diff --git a/CHANGELOG.md b/CHANGELOG.md index c8cd4f640..34ae7cb5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Change Log +## [v1.5.2](https://github.com/containous/traefik/tree/v1.5.2) (2018-02-12) +[All Commits](https://github.com/containous/traefik/compare/v1.5.1...v1.5.2) + +**Bug fixes:** +- **[acme,cluster,kv]** Compress ACME certificates in KV stores. ([#2814](https://github.com/containous/traefik/pull/2814) by [nmengin](https://github.com/nmengin)) +- **[acme]** Traefik still start when Let's encrypt is down ([#2794](https://github.com/containous/traefik/pull/2794) by [Juliens](https://github.com/Juliens)) +- **[docker]** Fix dnsrr endpoint mode excluded when not using swarm LB ([#2795](https://github.com/containous/traefik/pull/2795) by [mmatur](https://github.com/mmatur)) +- **[eureka]** Continue refresh the configuration after a failure. ([#2838](https://github.com/containous/traefik/pull/2838) by [ldez](https://github.com/ldez)) +- **[logs]** Reduce oxy round trip logs to debug. ([#2821](https://github.com/containous/traefik/pull/2821) by [timoreimann](https://github.com/timoreimann)) +- **[websocket]** Fix goroutine leaks in websocket ([#2825](https://github.com/containous/traefik/pull/2825) by [Juliens](https://github.com/Juliens)) +- Hide the pflag error when displaying help. ([#2800](https://github.com/containous/traefik/pull/2800) by [ldez](https://github.com/ldez)) + +**Documentation:** +- **[docker]** Explain how to write entrypoints definition in a compose file ([#2834](https://github.com/containous/traefik/pull/2834) by [mmatur](https://github.com/mmatur)) +- **[docker]** Fix typo ([#2813](https://github.com/containous/traefik/pull/2813) by [uschtwill](https://github.com/uschtwill)) +- **[k8s]** typo in "i"ngress annotations. ([#2780](https://github.com/containous/traefik/pull/2780) by [RRAlex](https://github.com/RRAlex)) +- Clarify how setting a frontend priority works ([#2818](https://github.com/containous/traefik/pull/2818) by [sirlatrom](https://github.com/sirlatrom)) +- Fixed typo. ([#2811](https://github.com/containous/traefik/pull/2811) by [sonus21](https://github.com/sonus21)) +- Docs: regex+replacement hints for URL rewriting ([#2802](https://github.com/containous/traefik/pull/2802) by [djeeg](https://github.com/djeeg)) +- Add documentation about entry points definition with CLI. ([#2798](https://github.com/containous/traefik/pull/2798) by [ldez](https://github.com/ldez)) + ## [v1.5.1](https://github.com/containous/traefik/tree/v1.5.1) (2018-01-29) [All Commits](https://github.com/containous/traefik/compare/v1.5.0...v1.5.1) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fa15f85bc..0966c132f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -87,9 +87,11 @@ If you happen to update the provider templates (in `/templates`), you need to ru [dep](https://github.com/golang/dep) is not required for building; however, it is necessary to modify dependencies (i.e., add, update, or remove third-party packages) +You need to use [dep](https://github.com/golang/dep) >= O.4.1. + If you want to add a dependency, use `dep ensure -add` to have [dep](https://github.com/golang/dep) put it into the vendor folder and update the dep manifest/lock files (`Gopkg.toml` and `Gopkg.lock`, respectively). -A following `make prune-dep` run should be triggered to trim down the size of the vendor folder. +A following `make dep-prune` run should be triggered to trim down the size of the vendor folder. The final result must be committed into VCS. Here's a full example using dep to add a new dependency: diff --git a/Gopkg.lock b/Gopkg.lock index 64fcdb25f..07b526c58 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -310,7 +310,8 @@ [[projects]] name = "github.com/davecgh/go-spew" packages = ["spew"] - revision = "04cdfd42973bb9c8589fd6a731800cf222fde1a9" + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" [[projects]] branch = "master" @@ -966,7 +967,8 @@ [[projects]] name = "github.com/pmezard/go-difflib" packages = ["difflib"] - revision = "d8ed2627bdf02c080bf22230dbb337003b7aba2d" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" [[projects]] name = "github.com/prometheus/client_golang" @@ -1046,7 +1048,8 @@ [[projects]] name = "github.com/stretchr/objx" packages = ["."] - revision = "cbeaeb16a013161a98496fad62933b1d21786672" + revision = "facf9a85c22f48d2f52f2380e4efce1768749a89" + version = "v0.1" [[projects]] name = "github.com/stretchr/testify" @@ -1055,7 +1058,8 @@ "mock", "require" ] - revision = "4d4bfba8f1d1027c4fdbe371823030df51419987" + revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" + version = "v1.2.1" [[projects]] branch = "master" @@ -1492,6 +1496,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "f87576548de74d86b0e93a28551b97317addba5731345338272fdbb8a22ad77f" + inputs-digest = "865659187f3824cb57b8ff8805e3114b806c67ee0ffe9cbc6db237e4038edee6" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index e0b4f0913..d14276bad 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -156,6 +156,10 @@ branch = "master" name = "github.com/stvp/go-udp-testing" +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.2.1" + [[constraint]] name = "github.com/uber/jaeger-client-go" version = "2.9.0" @@ -233,3 +237,8 @@ # ALWAYS keep this override name = "github.com/mailgun/timetools" revision = "7e6055773c5137efbeb3bd2410d705fe10ab6bfd" + +[prune] + non-go = true + go-tests = true + unused-packages = true diff --git a/Makefile b/Makefile index 9683dc92d..a8b5ea4b6 100644 --- a/Makefile +++ b/Makefile @@ -127,7 +127,11 @@ fmt: pull-images: grep --no-filename -E '^\s+image:' ./integration/resources/compose/*.yml | awk '{print $$2}' | sort | uniq | xargs -P 6 -n 1 docker pull -prune-dep: +dep-ensure: + dep ensure -v + ./script/prune-dep.sh + +dep-prune: ./script/prune-dep.sh help: ## this help diff --git a/build.Dockerfile b/build.Dockerfile index 006de77c1..083733f03 100644 --- a/build.Dockerfile +++ b/build.Dockerfile @@ -11,15 +11,13 @@ RUN go get github.com/containous/go-bindata/... \ # Which docker version to test on ARG DOCKER_VERSION=17.03.2 -ARG DEP_VERSION=0.3.2 +ARG DEP_VERSION=0.4.1 # Download dep binary to bin folder in $GOPATH RUN mkdir -p /usr/local/bin \ && curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 \ && chmod +x /usr/local/bin/dep - - # Download docker RUN mkdir -p /usr/local/bin \ && curl -fL https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}-ce.tgz \ diff --git a/docs/configuration/acme.md b/docs/configuration/acme.md index 09b4962ed..3248ecc6b 100644 --- a/docs/configuration/acme.md +++ b/docs/configuration/acme.md @@ -165,9 +165,26 @@ storage = "acme.json" # ... ``` -File or key used for certificates storage. +The `storage` option sets where are stored your ACME certificates. -**WARNING:** If you use Træfik in Docker, you have 2 options: +There are two kind of `storage` : +- a JSON file, +- a KV store entry. + +!!! danger "DEPRECATED" + `storage` replaces `storageFile` which is deprecated. + +!!! note + During Træfik configuration migration from a configuration file to a KV store (thanks to `storeconfig` subcommand as described [here](/user-guide/kv-config/#store-configuration-in-key-value-store)), if ACME certificates have to be migrated too, use both `storageFile` and `storage`. + + - `storageFile` will contain the path to the `acme.json` file to migrate. + - `storage` will contain the key where the certificates will be stored. + +#### Store data in a file + +ACME certificates can be stored in a JSON file which with the `600` right mode. + +There are two ways to store ACME certificates in a file from Docker: - create a file on your host and mount it as a volume: ```toml @@ -176,7 +193,6 @@ storage = "acme.json" ```bash docker run -v "/my/host/acme.json:acme.json" traefik ``` - - mount the folder containing the file as a volume ```toml storage = "/etc/traefik/acme/acme.json" @@ -185,14 +201,24 @@ storage = "/etc/traefik/acme/acme.json" docker run -v "/my/host/acme:/etc/traefik/acme" traefik ``` -!!! note - `storage` replaces `storageFile` which is deprecated. +!!! warning + This file cannot be shared per many instances of Træfik at the same time. + If you have to use Træfik cluster mode, please use [a KV Store entry](/configuration/acme/#storage-kv-entry). + +#### Store data in a KV store entry + +ACME certificates can be stored in a KV Store entry. + +```toml +storage = "traefik/acme/account" +``` + +**This kind of storage is mandatory in cluster mode.** + +Because KV stores (like Consul) have limited entries size, the certificates list is compressed before to be set in a KV store entry. !!! note - During Træfik configuration migration from a configuration file to a KV store (thanks to `storeconfig` subcommand as described [here](/user-guide/kv-config/#store-configuration-in-key-value-store)), if ACME certificates have to be migrated too, use both `storageFile` and `storage`. - - - `storageFile` will contain the path to the `acme.json` file to migrate. - - `storage` will contain the key where the certificates will be stored. + It's possible to store up to approximately 100 ACME certificates in Consul. ### `acme.httpChallenge` @@ -288,7 +314,7 @@ Useful if internal networks block external DNS queries. ### `onDemand` (Deprecated) -!!! warning +!!! danger "DEPRECATED" This option is deprecated. ```toml @@ -365,12 +391,12 @@ Each domain & SANs will lead to a certificate request. ### `dnsProvider` (Deprecated) -!!! warning +!!! danger "DEPRECATED" This option is deprecated. Please refer to [DNS challenge provider section](/configuration/acme/#provider) ### `delayDontCheckDNS` (Deprecated) -!!! warning +!!! danger "DEPRECATED" This option is deprecated. Please refer to [DNS challenge delayBeforeCheck section](/configuration/acme/#delaybeforecheck) diff --git a/docs/configuration/entrypoints.md b/docs/configuration/entrypoints.md index 1280a8060..fa0b7c9bb 100644 --- a/docs/configuration/entrypoints.md +++ b/docs/configuration/entrypoints.md @@ -79,7 +79,23 @@ For more information about the CLI, see the documentation about [Traefik command Whitespace is used as option separator and `,` is used as value separator for the list. The names of the options are case-insensitive. -All available options: +In compose file the entrypoint syntax is different: + +```yaml +traefik: + image: traefik + command: + - --defaultentrypoints=powpow + - "--entryPoints=Name:powpow Address::42 Compress:true" +``` +or +```yaml +traefik: + image: traefik + command: --defaultentrypoints=powpow --entryPoints='Name:powpow Address::42 Compress:true' +``` + +#### All available options: ```ini Name:foo @@ -223,9 +239,8 @@ In the example below both `snitest.com` and `snitest.org` will require client ce ``` !!! note - -The deprecated argument `ClientCAFiles` allows adding Client CA files which are mandatory. -If this parameter exists, the new ones are not checked. + The deprecated argument `ClientCAFiles` allows adding Client CA files which are mandatory. + If this parameter exists, the new ones are not checked. ## Authentication diff --git a/docs/user-guide/cluster-docker-consul.md b/docs/user-guide/cluster-docker-consul.md index 4fb9027f1..eb506956d 100644 --- a/docs/user-guide/cluster-docker-consul.md +++ b/docs/user-guide/cluster-docker-consul.md @@ -94,8 +94,8 @@ services: image: traefik:1.5 command: - "--api" - - "--entrypoints='Name:http Address::80 Redirect.EntryPoint:https'" - - "--entrypoints='Name:https Address::443 TLS'" + - "--entrypoints=Name:http Address::80 Redirect.EntryPoint:https" + - "--entrypoints=Name:https Address::443 TLS" - "--defaultentrypoints=http,https" - "--acme" - "--acme.storage=/etc/traefik/acme/acme.json" @@ -204,8 +204,8 @@ services: command: - "storeconfig" - "--api" - - "--entrypoints='Name:http Address::80 Redirect.EntryPoint:https'" - - "--entrypoints='Name:https Address::443 TLS'" + - "--entrypoints=Name:http Address::80 Redirect.EntryPoint:https" + - "--entrypoints=Name:https Address::443 TLS" - "--defaultentrypoints=http,https" - "--acme" - "--acme.storage=traefik/acme/account" diff --git a/docs/user-guide/cluster.md b/docs/user-guide/cluster.md index 370add8f8..111527641 100644 --- a/docs/user-guide/cluster.md +++ b/docs/user-guide/cluster.md @@ -23,3 +23,11 @@ A Træfik cluster is based on a manager/worker model. When starting, Træfik will elect a manager. If this instance fails, another manager will be automatically elected. + +## Træfik cluster and Let's Encrypt + +**In cluster mode, ACME certificates have to be stored in [a KV Store entry](/configuration/acme/#storage-kv-entry).** + +Thanks to the Træfik cluster mode algorithm (based on [the Raft Consensus Algorithm](https://raft.github.io/)), only one instance will contact Let's encrypt to solve the challenges. + +The others instances will get ACME certificate from the KV Store entry. \ No newline at end of file diff --git a/examples/acme/compose-acme.yml b/examples/acme/compose-acme.yml index c3c6317e3..174da6227 100644 --- a/examples/acme/compose-acme.yml +++ b/examples/acme/compose-acme.yml @@ -29,6 +29,8 @@ services : - bhsm - bmysql - brabbitmq + volumes: + - "./rate-limit-policies.yml:/go/src/github.com/letsencrypt/boulder/test/rate-limit-policies.yml:ro" bhsm: image: letsencrypt/boulder-tools:2016-11-02 diff --git a/examples/acme/rate-limit-policies.yml b/examples/acme/rate-limit-policies.yml new file mode 100644 index 000000000..80431a773 --- /dev/null +++ b/examples/acme/rate-limit-policies.yml @@ -0,0 +1,42 @@ +totalCertificates: + window: 1h + threshold: 100000 +certificatesPerName: + window: 1h + threshold: 100000 + overrides: + ratelimit.me: 1 + lim.it: 0 + # Hostnames used by the letsencrypt client integration test. + le.wtf: 10000 + le1.wtf: 10000 + le2.wtf: 10000 + le3.wtf: 10000 + nginx.wtf: 10000 + good-caa-reserved.com: 10000 + bad-caa-reserved.com: 10000 + ecdsa.le.wtf: 10000 + must-staple.le.wtf: 10000 + registrationOverrides: + 101: 1000 +registrationsPerIP: + window: 1h + threshold: 100000 + overrides: + 127.0.0.1: 1000000 +pendingAuthorizationsPerAccount: + window: 1h + threshold: 100000 +certificatesPerFQDNSet: + window: 1h + threshold: 100000 + overrides: + le.wtf: 10000 + le1.wtf: 10000 + le2.wtf: 10000 + le3.wtf: 10000 + le.wtf,le1.wtf: 10000 + good-caa-reserved.com: 10000 + nginx.wtf: 10000 + ecdsa.le.wtf: 10000 + must-staple.le.wtf: 10000 diff --git a/examples/cluster/docker-compose.yml b/examples/cluster/docker-compose.yml index 0f9640f6b..5a6d53718 100644 --- a/examples/cluster/docker-compose.yml +++ b/examples/cluster/docker-compose.yml @@ -73,6 +73,8 @@ services: - bhsm - bmysql - brabbitmq + volumes: + - "./rate-limit-policies.yml:/go/src/github.com/letsencrypt/boulder/test/rate-limit-policies.yml:ro" networks: net: ipv4_address: 10.0.1.3 diff --git a/examples/cluster/rate-limit-policies.yml b/examples/cluster/rate-limit-policies.yml new file mode 100644 index 000000000..80431a773 --- /dev/null +++ b/examples/cluster/rate-limit-policies.yml @@ -0,0 +1,42 @@ +totalCertificates: + window: 1h + threshold: 100000 +certificatesPerName: + window: 1h + threshold: 100000 + overrides: + ratelimit.me: 1 + lim.it: 0 + # Hostnames used by the letsencrypt client integration test. + le.wtf: 10000 + le1.wtf: 10000 + le2.wtf: 10000 + le3.wtf: 10000 + nginx.wtf: 10000 + good-caa-reserved.com: 10000 + bad-caa-reserved.com: 10000 + ecdsa.le.wtf: 10000 + must-staple.le.wtf: 10000 + registrationOverrides: + 101: 1000 +registrationsPerIP: + window: 1h + threshold: 100000 + overrides: + 127.0.0.1: 1000000 +pendingAuthorizationsPerAccount: + window: 1h + threshold: 100000 +certificatesPerFQDNSet: + window: 1h + threshold: 100000 + overrides: + le.wtf: 10000 + le1.wtf: 10000 + le2.wtf: 10000 + le3.wtf: 10000 + le.wtf,le1.wtf: 10000 + good-caa-reserved.com: 10000 + nginx.wtf: 10000 + ecdsa.le.wtf: 10000 + must-staple.le.wtf: 10000 diff --git a/examples/cluster/traefik.toml.tmpl b/examples/cluster/traefik.toml.tmpl index 891e994b1..478027f00 100644 --- a/examples/cluster/traefik.toml.tmpl +++ b/examples/cluster/traefik.toml.tmpl @@ -19,8 +19,7 @@ caServer = "http://traefik.boulder.com:4000/directory" entryPoint="http" -[web] -address = ":8080" +[api] [docker] endpoint = "unix:///var/run/docker.sock" diff --git a/provider/eureka/eureka.go b/provider/eureka/eureka.go index 0329cf42b..37cd4a69c 100644 --- a/provider/eureka/eureka.go +++ b/provider/eureka/eureka.go @@ -55,7 +55,7 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s configuration, err := p.buildConfiguration() if err != nil { log.Errorf("Failed to refresh Provider configuration, error: %s", err) - return + continue } configurationChan <- types.ConfigMessage{ diff --git a/script/prune-dep.sh b/script/prune-dep.sh index 62a68a784..0ca0861ee 100755 --- a/script/prune-dep.sh +++ b/script/prune-dep.sh @@ -6,8 +6,6 @@ set -o nounset echo "Prune dependencies" -dep prune - find vendor -name '*_test.go' -exec rm {} \; find vendor -type f \( ! -iname 'licen[cs]e*' \ @@ -33,3 +31,28 @@ find vendor -type f \( ! -iname 'licen[cs]e*' \ -a ! -iname '*.hpp' \ -a ! -iname '*.hxx' \ -a ! -iname '*.s' \) -exec rm -f {} + + +find -type d \( -iname '*Godeps*' \) -exec rm -rf {} + + +find vendor -type l \( ! -iname 'licen[cs]e*' \ + -a ! -iname '*notice*' \ + -a ! -iname '*patent*' \ + -a ! -iname '*copying*' \ + -a ! -iname '*unlicense*' \ + -a ! -iname '*copyright*' \ + -a ! -iname '*copyleft*' \ + -a ! -iname '*legal*' \ + -a ! -iname 'disclaimer*' \ + -a ! -iname 'third-party*' \ + -a ! -iname 'thirdparty*' \ + -a ! -iname '*.go' \ + -a ! -iname '*.c' \ + -a ! -iname '*.S' \ + -a ! -iname '*.cc' \ + -a ! -iname '*.cpp' \ + -a ! -iname '*.cxx' \ + -a ! -iname '*.h' \ + -a ! -iname '*.hh' \ + -a ! -iname '*.hpp' \ + -a ! -iname '*.hxx' \ + -a ! -iname '*.s' \) -exec rm -f {} + \ No newline at end of file diff --git a/vendor/cloud.google.com/go/cloud.go b/vendor/cloud.google.com/go/cloud.go deleted file mode 100644 index 6ba428dc6..000000000 --- a/vendor/cloud.google.com/go/cloud.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cloud is the root of the packages used to access Google Cloud -// Services. See https://godoc.org/cloud.google.com/go for a full list -// of sub-packages. -// -// This package documents how to authorize and authenticate the sub packages. -package cloud // import "cloud.google.com/go" diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING new file mode 100644 index 000000000..5a8e33254 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING new file mode 100644 index 000000000..5a8e33254 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING new file mode 100644 index 000000000..5a8e33254 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/vendor/github.com/JamesClonk/vultr/vultr.go b/vendor/github.com/JamesClonk/vultr/vultr.go deleted file mode 100644 index 17b179547..000000000 --- a/vendor/github.com/JamesClonk/vultr/vultr.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -import ( - "os" - - "github.com/JamesClonk/vultr/cmd" -) - -func main() { - cli := cmd.NewCLI() - cli.RegisterCommands() - cli.Run(os.Args) -} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/apache/thrift/contrib/fb303/LICENSE b/vendor/github.com/apache/thrift/contrib/fb303/LICENSE new file mode 100644 index 000000000..4eacb6431 --- /dev/null +++ b/vendor/github.com/apache/thrift/contrib/fb303/LICENSE @@ -0,0 +1,16 @@ +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. diff --git a/vendor/github.com/apache/thrift/debian/copyright b/vendor/github.com/apache/thrift/debian/copyright new file mode 100644 index 000000000..850643c9a --- /dev/null +++ b/vendor/github.com/apache/thrift/debian/copyright @@ -0,0 +1,129 @@ +This package was debianized by Thrift Developer's . + + +This package and the Debian packaging is licensed under the Apache License, +see `/usr/share/common-licenses/Apache-2.0'. + +The following information was copied from Apache Thrift LICENSE file. + +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: + +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. + +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: + + lib/erl/src/Makefile.am + +Please see doc/otp-base-license.txt for the full terms of this license. + + +-------------------------------------------------- +The following files contain some portions of code contributed under +the Thrift Software License (see doc/old-thrift-license.txt), and relicensed +under the Apache 2.0 License: + + compiler/cpp/Makefile.am + compiler/cpp/src/generate/t_cocoa_generator.cc + compiler/cpp/src/generate/t_cpp_generator.cc + compiler/cpp/src/generate/t_csharp_generator.cc + compiler/cpp/src/generate/t_erl_generator.cc + compiler/cpp/src/generate/t_hs_generator.cc + compiler/cpp/src/generate/t_java_generator.cc + compiler/cpp/src/generate/t_ocaml_generator.cc + compiler/cpp/src/generate/t_perl_generator.cc + compiler/cpp/src/generate/t_php_generator.cc + compiler/cpp/src/generate/t_py_generator.cc + compiler/cpp/src/generate/t_rb_generator.cc + compiler/cpp/src/generate/t_st_generator.cc + compiler/cpp/src/generate/t_xsd_generator.cc + compiler/cpp/src/main.cc + compiler/cpp/src/parse/t_field.h + compiler/cpp/src/parse/t_program.h + compiler/cpp/src/platform.h + compiler/cpp/src/thriftl.ll + compiler/cpp/src/thrifty.yy + lib/csharp/src/Protocol/TBinaryProtocol.cs + lib/csharp/src/Protocol/TField.cs + lib/csharp/src/Protocol/TList.cs + lib/csharp/src/Protocol/TMap.cs + lib/csharp/src/Protocol/TMessage.cs + lib/csharp/src/Protocol/TMessageType.cs + lib/csharp/src/Protocol/TProtocol.cs + lib/csharp/src/Protocol/TProtocolException.cs + lib/csharp/src/Protocol/TProtocolFactory.cs + lib/csharp/src/Protocol/TProtocolUtil.cs + lib/csharp/src/Protocol/TSet.cs + lib/csharp/src/Protocol/TStruct.cs + lib/csharp/src/Protocol/TType.cs + lib/csharp/src/Server/TServer.cs + lib/csharp/src/Server/TSimpleServer.cs + lib/csharp/src/Server/TThreadPoolServer.cs + lib/csharp/src/TApplicationException.cs + lib/csharp/src/Thrift.csproj + lib/csharp/src/Thrift.sln + lib/csharp/src/TProcessor.cs + lib/csharp/src/Transport/TServerSocket.cs + lib/csharp/src/Transport/TServerTransport.cs + lib/csharp/src/Transport/TSocket.cs + lib/csharp/src/Transport/TStreamTransport.cs + lib/csharp/src/Transport/TTransport.cs + lib/csharp/src/Transport/TTransportException.cs + lib/csharp/src/Transport/TTransportFactory.cs + lib/csharp/ThriftMSBuildTask/Properties/AssemblyInfo.cs + lib/csharp/ThriftMSBuildTask/ThriftBuild.cs + lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj + lib/rb/lib/thrift.rb + lib/st/README + lib/st/thrift.st + test/OptionalRequiredTest.cpp + test/OptionalRequiredTest.thrift + test/ThriftTest.thrift + +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. + +-------------------------------------------------- +For the compiler/cpp/src/md5.[ch] components: + +/* + Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + L. Peter Deutsch + ghost@aladdin.com + + */ + +--------------------------------------------------- +For the lib/rb/setup.rb: Copyright (c) 2000-2005 Minero Aoki, +lib/ocaml/OCamlMakefile and lib/ocaml/README-OCamlMakefile components: + Copyright (C) 1999 - 2007 Markus Mottl + +Licensed under the terms of the GNU Lesser General Public License 2.1 +(see doc/lgpl-2.1.txt for the full terms of this license) diff --git a/vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER b/vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER new file mode 100644 index 000000000..4eacb6431 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER @@ -0,0 +1,16 @@ +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. diff --git a/vendor/github.com/apache/thrift/lib/hs/LICENSE b/vendor/github.com/apache/thrift/lib/hs/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/hs/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/sdk.go b/vendor/github.com/aws/aws-sdk-go/sdk.go deleted file mode 100644 index afa465a22..000000000 --- a/vendor/github.com/aws/aws-sdk-go/sdk.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package sdk is the official AWS SDK for the Go programming language. -// -// See our Developer Guide for information for on getting started and using -// the SDK. -// -// https://github.com/aws/aws-sdk-go/wiki -package sdk diff --git a/vendor/github.com/aws/aws-sdk-go/service/generate.go b/vendor/github.com/aws/aws-sdk-go/service/generate.go deleted file mode 100644 index 3ffc9fcc5..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/generate.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package service contains automatically generated AWS clients. -package service - -//go:generate go run -tags codegen ../private/model/cli/gen-api/main.go -path=../service ../models/apis/*/*/api-2.json -//go:generate gofmt -s -w ../service diff --git a/vendor/github.com/containerd/continuity/context.go b/vendor/github.com/containerd/continuity/context.go deleted file mode 100644 index 2667a97df..000000000 --- a/vendor/github.com/containerd/continuity/context.go +++ /dev/null @@ -1,641 +0,0 @@ -package continuity - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "path/filepath" - "strings" - - "github.com/containerd/continuity/devices" - driverpkg "github.com/containerd/continuity/driver" - "github.com/containerd/continuity/pathdriver" - "github.com/opencontainers/go-digest" -) - -var ( - ErrNotFound = fmt.Errorf("not found") - ErrNotSupported = fmt.Errorf("not supported") -) - -// Context represents a file system context for accessing resources. The -// responsibility of the context is to convert system specific resources to -// generic Resource objects. Most of this is safe path manipulation, as well -// as extraction of resource details. -type Context interface { - Apply(Resource) error - Verify(Resource) error - Resource(string, os.FileInfo) (Resource, error) - Walk(filepath.WalkFunc) error -} - -// SymlinkPath is intended to give the symlink target value -// in a root context. Target and linkname are absolute paths -// not under the given root. -type SymlinkPath func(root, linkname, target string) (string, error) - -type ContextOptions struct { - Digester Digester - Driver driverpkg.Driver - PathDriver pathdriver.PathDriver - Provider ContentProvider -} - -// context represents a file system context for accessing resources. -// Generally, all path qualified access and system considerations should land -// here. -type context struct { - driver driverpkg.Driver - pathDriver pathdriver.PathDriver - root string - digester Digester - provider ContentProvider -} - -// NewContext returns a Context associated with root. The default driver will -// be used, as returned by NewDriver. -func NewContext(root string) (Context, error) { - return NewContextWithOptions(root, ContextOptions{}) -} - -// NewContextWithOptions returns a Context associate with the root. -func NewContextWithOptions(root string, options ContextOptions) (Context, error) { - // normalize to absolute path - pathDriver := options.PathDriver - if pathDriver == nil { - pathDriver = pathdriver.LocalPathDriver - } - - root = pathDriver.FromSlash(root) - root, err := pathDriver.Abs(pathDriver.Clean(root)) - if err != nil { - return nil, err - } - - driver := options.Driver - if driver == nil { - driver, err = driverpkg.NewSystemDriver() - if err != nil { - return nil, err - } - } - - digester := options.Digester - if digester == nil { - digester = simpleDigester{digest.Canonical} - } - - // Check the root directory. Need to be a little careful here. We are - // allowing a link for now, but this may have odd behavior when - // canonicalizing paths. As long as all files are opened through the link - // path, this should be okay. - fi, err := driver.Stat(root) - if err != nil { - return nil, err - } - - if !fi.IsDir() { - return nil, &os.PathError{Op: "NewContext", Path: root, Err: os.ErrInvalid} - } - - return &context{ - root: root, - driver: driver, - pathDriver: pathDriver, - digester: digester, - provider: options.Provider, - }, nil -} - -// Resource returns the resource as path p, populating the entry with info -// from fi. The path p should be the path of the resource in the context, -// typically obtained through Walk or from the value of Resource.Path(). If fi -// is nil, it will be resolved. -func (c *context) Resource(p string, fi os.FileInfo) (Resource, error) { - fp, err := c.fullpath(p) - if err != nil { - return nil, err - } - - if fi == nil { - fi, err = c.driver.Lstat(fp) - if err != nil { - return nil, err - } - } - - base, err := newBaseResource(p, fi) - if err != nil { - return nil, err - } - - base.xattrs, err = c.resolveXAttrs(fp, fi, base) - if err == ErrNotSupported { - log.Printf("resolving xattrs on %s not supported", fp) - } else if err != nil { - return nil, err - } - - // TODO(stevvooe): Handle windows alternate data streams. - - if fi.Mode().IsRegular() { - dgst, err := c.digest(p) - if err != nil { - return nil, err - } - - return newRegularFile(*base, base.paths, fi.Size(), dgst) - } - - if fi.Mode().IsDir() { - return newDirectory(*base) - } - - if fi.Mode()&os.ModeSymlink != 0 { - // We handle relative links vs absolute links by including a - // beginning slash for absolute links. Effectively, the bundle's - // root is treated as the absolute link anchor. - target, err := c.driver.Readlink(fp) - if err != nil { - return nil, err - } - - return newSymLink(*base, target) - } - - if fi.Mode()&os.ModeNamedPipe != 0 { - return newNamedPipe(*base, base.paths) - } - - if fi.Mode()&os.ModeDevice != 0 { - deviceDriver, ok := c.driver.(driverpkg.DeviceInfoDriver) - if !ok { - log.Printf("device extraction not supported %s", fp) - return nil, ErrNotSupported - } - - // character and block devices merely need to recover the - // major/minor device number. - major, minor, err := deviceDriver.DeviceInfo(fi) - if err != nil { - return nil, err - } - - return newDevice(*base, base.paths, major, minor) - } - - log.Printf("%q (%v) is not supported", fp, fi.Mode()) - return nil, ErrNotFound -} - -func (c *context) verifyMetadata(resource, target Resource) error { - if target.Mode() != resource.Mode() { - return fmt.Errorf("resource %q has incorrect mode: %v != %v", target.Path(), target.Mode(), resource.Mode()) - } - - if target.UID() != resource.UID() { - return fmt.Errorf("unexpected uid for %q: %v != %v", target.Path(), target.UID(), resource.GID()) - } - - if target.GID() != resource.GID() { - return fmt.Errorf("unexpected gid for %q: %v != %v", target.Path(), target.GID(), target.GID()) - } - - if xattrer, ok := resource.(XAttrer); ok { - txattrer, tok := target.(XAttrer) - if !tok { - return fmt.Errorf("resource %q has xattrs but target does not support them", resource.Path()) - } - - // For xattrs, only ensure that we have those defined in the resource - // and their values match. We can ignore other xattrs. In other words, - // we only verify that target has the subset defined by resource. - txattrs := txattrer.XAttrs() - for attr, value := range xattrer.XAttrs() { - tvalue, ok := txattrs[attr] - if !ok { - return fmt.Errorf("resource %q target missing xattr %q", resource.Path(), attr) - } - - if !bytes.Equal(value, tvalue) { - return fmt.Errorf("xattr %q value differs for resource %q", attr, resource.Path()) - } - } - } - - switch r := resource.(type) { - case RegularFile: - // TODO(stevvooe): Another reason to use a record-based approach. We - // have to do another type switch to get this to work. This could be - // fixed with an Equal function, but let's study this a little more to - // be sure. - t, ok := target.(RegularFile) - if !ok { - return fmt.Errorf("resource %q target not a regular file", r.Path()) - } - - if t.Size() != r.Size() { - return fmt.Errorf("resource %q target has incorrect size: %v != %v", t.Path(), t.Size(), r.Size()) - } - case Directory: - t, ok := target.(Directory) - if !ok { - return fmt.Errorf("resource %q target not a directory", t.Path()) - } - case SymLink: - t, ok := target.(SymLink) - if !ok { - return fmt.Errorf("resource %q target not a symlink", t.Path()) - } - - if t.Target() != r.Target() { - return fmt.Errorf("resource %q target has mismatched target: %q != %q", t.Path(), t.Target(), r.Target()) - } - case Device: - t, ok := target.(Device) - if !ok { - return fmt.Errorf("resource %q is not a device", t.Path()) - } - - if t.Major() != r.Major() || t.Minor() != r.Minor() { - return fmt.Errorf("resource %q has mismatched major/minor numbers: %d,%d != %d,%d", t.Path(), t.Major(), t.Minor(), r.Major(), r.Minor()) - } - case NamedPipe: - t, ok := target.(NamedPipe) - if !ok { - return fmt.Errorf("resource %q is not a named pipe", t.Path()) - } - default: - return fmt.Errorf("cannot verify resource: %v", resource) - } - - return nil -} - -// Verify the resource in the context. An error will be returned a discrepancy -// is found. -func (c *context) Verify(resource Resource) error { - fp, err := c.fullpath(resource.Path()) - if err != nil { - return err - } - - fi, err := c.driver.Lstat(fp) - if err != nil { - return err - } - - target, err := c.Resource(resource.Path(), fi) - if err != nil { - return err - } - - if target.Path() != resource.Path() { - return fmt.Errorf("resource paths do not match: %q != %q", target.Path(), resource.Path()) - } - - if err := c.verifyMetadata(resource, target); err != nil { - return err - } - - if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable { - hardlinkKey, err := newHardlinkKey(fi) - if err == errNotAHardLink { - if len(h.Paths()) > 1 { - return fmt.Errorf("%q is not a hardlink to %q", h.Paths()[1], resource.Path()) - } - } else if err != nil { - return err - } - - for _, path := range h.Paths()[1:] { - fpLink, err := c.fullpath(path) - if err != nil { - return err - } - - fiLink, err := c.driver.Lstat(fpLink) - if err != nil { - return err - } - - targetLink, err := c.Resource(path, fiLink) - if err != nil { - return err - } - - hardlinkKeyLink, err := newHardlinkKey(fiLink) - if err != nil { - return err - } - - if hardlinkKeyLink != hardlinkKey { - return fmt.Errorf("%q is not a hardlink to %q", path, resource.Path()) - } - - if err := c.verifyMetadata(resource, targetLink); err != nil { - return err - } - } - } - - switch r := resource.(type) { - case RegularFile: - t, ok := target.(RegularFile) - if !ok { - return fmt.Errorf("resource %q target not a regular file", r.Path()) - } - - // TODO(stevvooe): This may need to get a little more sophisticated - // for digest comparison. We may want to actually calculate the - // provided digests, rather than the implementations having an - // overlap. - if !digestsMatch(t.Digests(), r.Digests()) { - return fmt.Errorf("digests for resource %q do not match: %v != %v", t.Path(), t.Digests(), r.Digests()) - } - } - - return nil -} - -func (c *context) checkoutFile(fp string, rf RegularFile) error { - if c.provider == nil { - return fmt.Errorf("no file provider") - } - var ( - r io.ReadCloser - err error - ) - for _, dgst := range rf.Digests() { - r, err = c.provider.Reader(dgst) - if err == nil { - break - } - } - if err != nil { - return fmt.Errorf("file content could not be provided: %v", err) - } - defer r.Close() - - return atomicWriteFile(fp, r, rf) -} - -// Apply the resource to the contexts. An error will be returned if the -// operation fails. Depending on the resource type, the resource may be -// created. For resource that cannot be resolved, an error will be returned. -func (c *context) Apply(resource Resource) error { - fp, err := c.fullpath(resource.Path()) - if err != nil { - return err - } - - if !strings.HasPrefix(fp, c.root) { - return fmt.Errorf("resource %v escapes root", resource) - } - - var chmod = true - fi, err := c.driver.Lstat(fp) - if err != nil { - if !os.IsNotExist(err) { - return err - } - } - - switch r := resource.(type) { - case RegularFile: - if fi == nil { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - chmod = false - } else { - if !fi.Mode().IsRegular() { - return fmt.Errorf("file %q should be a regular file, but is not", resource.Path()) - } - if fi.Size() != r.Size() { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - } else { - for _, dgst := range r.Digests() { - f, err := os.Open(fp) - if err != nil { - return fmt.Errorf("failure opening file for read %q: %v", resource.Path(), err) - } - compared, err := dgst.Algorithm().FromReader(f) - if err == nil && dgst != compared { - if err := c.checkoutFile(fp, r); err != nil { - return fmt.Errorf("error checking out file %q: %v", resource.Path(), err) - } - break - } - if err1 := f.Close(); err == nil { - err = err1 - } - if err != nil { - return fmt.Errorf("error checking digest for %q: %v", resource.Path(), err) - } - } - } - } - case Directory: - if fi == nil { - if err := c.driver.Mkdir(fp, resource.Mode()); err != nil { - return err - } - } else if !fi.Mode().IsDir() { - return fmt.Errorf("%q should be a directory, but is not", resource.Path()) - } - - case SymLink: - var target string // only possibly set if target resource is a symlink - - if fi != nil { - if fi.Mode()&os.ModeSymlink != 0 { - target, err = c.driver.Readlink(fp) - if err != nil { - return err - } - } - } - - if target != r.Target() { - if fi != nil { - if err := c.driver.Remove(fp); err != nil { // RemoveAll in case of directory? - return err - } - } - - if err := c.driver.Symlink(r.Target(), fp); err != nil { - return err - } - } - - // NOTE(stevvooe): Chmod on symlink is not supported on linux. We - // may want to maintain support for other platforms that have it. - chmod = false - - case Device: - if fi == nil { - if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil { - return err - } - } else if (fi.Mode() & os.ModeDevice) == 0 { - return fmt.Errorf("%q should be a device, but is not", resource.Path()) - } else { - major, minor, err := devices.DeviceInfo(fi) - if err != nil { - return err - } - if major != r.Major() || minor != r.Minor() { - if err := c.driver.Remove(fp); err != nil { - return err - } - - if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil { - return err - } - } - } - - case NamedPipe: - if fi == nil { - if err := c.driver.Mkfifo(fp, resource.Mode()); err != nil { - return err - } - } else if (fi.Mode() & os.ModeNamedPipe) == 0 { - return fmt.Errorf("%q should be a named pipe, but is not", resource.Path()) - } - } - - if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable { - for _, path := range h.Paths() { - if path == resource.Path() { - continue - } - - lp, err := c.fullpath(path) - if err != nil { - return err - } - - if _, fi := c.driver.Lstat(lp); fi == nil { - c.driver.Remove(lp) - } - if err := c.driver.Link(fp, lp); err != nil { - return err - } - } - } - - // Update filemode if file was not created - if chmod { - if err := c.driver.Lchmod(fp, resource.Mode()); err != nil { - return err - } - } - - if err := c.driver.Lchown(fp, resource.UID(), resource.GID()); err != nil { - return err - } - - if xattrer, ok := resource.(XAttrer); ok { - // For xattrs, only ensure that we have those defined in the resource - // and their values are set. We can ignore other xattrs. In other words, - // we only set xattres defined by resource but never remove. - - if _, ok := resource.(SymLink); ok { - lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver) - if !ok { - return fmt.Errorf("unsupported symlink xattr for resource %q", resource.Path()) - } - if err := lxattrDriver.LSetxattr(fp, xattrer.XAttrs()); err != nil { - return err - } - } else { - xattrDriver, ok := c.driver.(driverpkg.XAttrDriver) - if !ok { - return fmt.Errorf("unsupported xattr for resource %q", resource.Path()) - } - if err := xattrDriver.Setxattr(fp, xattrer.XAttrs()); err != nil { - return err - } - } - } - - return nil -} - -// Walk provides a convenience function to call filepath.Walk correctly for -// the context. Otherwise identical to filepath.Walk, the path argument is -// corrected to be contained within the context. -func (c *context) Walk(fn filepath.WalkFunc) error { - return c.pathDriver.Walk(c.root, func(p string, fi os.FileInfo, err error) error { - contained, err := c.contain(p) - return fn(contained, fi, err) - }) -} - -// fullpath returns the system path for the resource, joined with the context -// root. The path p must be a part of the context. -func (c *context) fullpath(p string) (string, error) { - p = c.pathDriver.Join(c.root, p) - if !strings.HasPrefix(p, c.root) { - return "", fmt.Errorf("invalid context path") - } - - return p, nil -} - -// contain cleans and santizes the filesystem path p to be an absolute path, -// effectively relative to the context root. -func (c *context) contain(p string) (string, error) { - sanitized, err := c.pathDriver.Rel(c.root, p) - if err != nil { - return "", err - } - - // ZOMBIES(stevvooe): In certain cases, we may want to remap these to a - // "containment error", so the caller can decide what to do. - return c.pathDriver.Join("/", c.pathDriver.Clean(sanitized)), nil -} - -// digest returns the digest of the file at path p, relative to the root. -func (c *context) digest(p string) (digest.Digest, error) { - f, err := c.driver.Open(c.pathDriver.Join(c.root, p)) - if err != nil { - return "", err - } - defer f.Close() - - return c.digester.Digest(f) -} - -// resolveXAttrs attempts to resolve the extended attributes for the resource -// at the path fp, which is the full path to the resource. If the resource -// cannot have xattrs, nil will be returned. -func (c *context) resolveXAttrs(fp string, fi os.FileInfo, base *resource) (map[string][]byte, error) { - if fi.Mode().IsRegular() || fi.Mode().IsDir() { - xattrDriver, ok := c.driver.(driverpkg.XAttrDriver) - if !ok { - log.Println("xattr extraction not supported") - return nil, ErrNotSupported - } - - return xattrDriver.Getxattr(fp) - } - - if fi.Mode()&os.ModeSymlink != 0 { - lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver) - if !ok { - log.Println("xattr extraction for symlinks not supported") - return nil, ErrNotSupported - } - - return lxattrDriver.LGetxattr(fp) - } - - return nil, nil -} diff --git a/vendor/github.com/containerd/continuity/digests.go b/vendor/github.com/containerd/continuity/digests.go deleted file mode 100644 index 355b08039..000000000 --- a/vendor/github.com/containerd/continuity/digests.go +++ /dev/null @@ -1,88 +0,0 @@ -package continuity - -import ( - "fmt" - "io" - "sort" - - "github.com/opencontainers/go-digest" -) - -// Digester produces a digest for a given read stream -type Digester interface { - Digest(io.Reader) (digest.Digest, error) -} - -// ContentProvider produces a read stream for a given digest -type ContentProvider interface { - Reader(digest.Digest) (io.ReadCloser, error) -} - -type simpleDigester struct { - algorithm digest.Algorithm -} - -func (sd simpleDigester) Digest(r io.Reader) (digest.Digest, error) { - digester := sd.algorithm.Digester() - - if _, err := io.Copy(digester.Hash(), r); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// uniqifyDigests sorts and uniqifies the provided digest, ensuring that the -// digests are not repeated and no two digests with the same algorithm have -// different values. Because a stable sort is used, this has the effect of -// "zipping" digest collections from multiple resources. -func uniqifyDigests(digests ...digest.Digest) ([]digest.Digest, error) { - sort.Stable(digestSlice(digests)) // stable sort is important for the behavior here. - seen := map[digest.Digest]struct{}{} - algs := map[digest.Algorithm][]digest.Digest{} // detect different digests. - - var out []digest.Digest - // uniqify the digests - for _, d := range digests { - if _, ok := seen[d]; ok { - continue - } - - seen[d] = struct{}{} - algs[d.Algorithm()] = append(algs[d.Algorithm()], d) - - if len(algs[d.Algorithm()]) > 1 { - return nil, fmt.Errorf("conflicting digests for %v found", d.Algorithm()) - } - - out = append(out, d) - } - - return out, nil -} - -// digestsMatch compares the two sets of digests to see if they match. -func digestsMatch(as, bs []digest.Digest) bool { - all := append(as, bs...) - - uniqified, err := uniqifyDigests(all...) - if err != nil { - // the only error uniqifyDigests returns is when the digests disagree. - return false - } - - disjoint := len(as) + len(bs) - if len(uniqified) == disjoint { - // if these two sets have the same cardinality, we know both sides - // didn't share any digests. - return false - } - - return true -} - -type digestSlice []digest.Digest - -func (p digestSlice) Len() int { return len(p) } -func (p digestSlice) Less(i, j int) bool { return p[i] < p[j] } -func (p digestSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/containerd/continuity/groups_unix.go b/vendor/github.com/containerd/continuity/groups_unix.go deleted file mode 100644 index e15c14ff8..000000000 --- a/vendor/github.com/containerd/continuity/groups_unix.go +++ /dev/null @@ -1,113 +0,0 @@ -package continuity - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// TODO(stevvooe): This needs a lot of work before we can call it useful. - -type groupIndex struct { - byName map[string]*group - byGID map[int]*group -} - -func getGroupIndex() (*groupIndex, error) { - f, err := os.Open("/etc/group") - if err != nil { - return nil, err - } - defer f.Close() - - groups, err := parseGroups(f) - if err != nil { - return nil, err - } - - return newGroupIndex(groups), nil -} - -func newGroupIndex(groups []group) *groupIndex { - gi := &groupIndex{ - byName: make(map[string]*group), - byGID: make(map[int]*group), - } - - for i, group := range groups { - gi.byGID[group.gid] = &groups[i] - gi.byName[group.name] = &groups[i] - } - - return gi -} - -type group struct { - name string - gid int - members []string -} - -func getGroupName(gid int) (string, error) { - f, err := os.Open("/etc/group") - if err != nil { - return "", err - } - defer f.Close() - - groups, err := parseGroups(f) - if err != nil { - return "", err - } - - for _, group := range groups { - if group.gid == gid { - return group.name, nil - } - } - - return "", fmt.Errorf("no group for gid") -} - -// parseGroups parses an /etc/group file for group names, ids and membership. -// This is unix specific. -func parseGroups(rd io.Reader) ([]group, error) { - var groups []group - scanner := bufio.NewScanner(rd) - - for scanner.Scan() { - if strings.HasPrefix(scanner.Text(), "#") { - continue // skip comment - } - - parts := strings.SplitN(scanner.Text(), ":", 4) - - if len(parts) != 4 { - return nil, fmt.Errorf("bad entry: %q", scanner.Text()) - } - - name, _, sgid, smembers := parts[0], parts[1], parts[2], parts[3] - - gid, err := strconv.Atoi(sgid) - if err != nil { - return nil, fmt.Errorf("bad gid: %q", gid) - } - - members := strings.Split(smembers, ",") - - groups = append(groups, group{ - name: name, - gid: gid, - members: members, - }) - } - - if scanner.Err() != nil { - return nil, scanner.Err() - } - - return groups, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks.go b/vendor/github.com/containerd/continuity/hardlinks.go deleted file mode 100644 index 8b39bd061..000000000 --- a/vendor/github.com/containerd/continuity/hardlinks.go +++ /dev/null @@ -1,57 +0,0 @@ -package continuity - -import ( - "fmt" - "os" -) - -var ( - errNotAHardLink = fmt.Errorf("invalid hardlink") -) - -type hardlinkManager struct { - hardlinks map[hardlinkKey][]Resource -} - -func newHardlinkManager() *hardlinkManager { - return &hardlinkManager{ - hardlinks: map[hardlinkKey][]Resource{}, - } -} - -// Add attempts to add the resource to the hardlink manager. If the resource -// cannot be considered as a hardlink candidate, errNotAHardLink is returned. -func (hlm *hardlinkManager) Add(fi os.FileInfo, resource Resource) error { - if _, ok := resource.(Hardlinkable); !ok { - return errNotAHardLink - } - - key, err := newHardlinkKey(fi) - if err != nil { - return err - } - - hlm.hardlinks[key] = append(hlm.hardlinks[key], resource) - - return nil -} - -// Merge processes the current state of the hardlink manager and merges any -// shared nodes into hardlinked resources. -func (hlm *hardlinkManager) Merge() ([]Resource, error) { - var resources []Resource - for key, linked := range hlm.hardlinks { - if len(linked) < 1 { - return nil, fmt.Errorf("no hardlink entrys for dev, inode pair: %#v", key) - } - - merged, err := Merge(linked...) - if err != nil { - return nil, fmt.Errorf("error merging hardlink: %v", err) - } - - resources = append(resources, merged) - } - - return resources, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks_unix.go b/vendor/github.com/containerd/continuity/hardlinks_unix.go deleted file mode 100644 index 1d81a3f96..000000000 --- a/vendor/github.com/containerd/continuity/hardlinks_unix.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build linux darwin freebsd solaris - -package continuity - -import ( - "fmt" - "os" - "syscall" -) - -// hardlinkKey provides a tuple-key for managing hardlinks. This is system- -// specific. -type hardlinkKey struct { - dev uint64 - inode uint64 -} - -// newHardlinkKey returns a hardlink key for the provided file info. If the -// resource does not represent a possible hardlink, errNotAHardLink will be -// returned. -func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) { - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return hardlinkKey{}, fmt.Errorf("cannot resolve (*syscall.Stat_t) from os.FileInfo") - } - - if sys.Nlink < 2 { - // NOTE(stevvooe): This is not always true for all filesystems. We - // should somehow detect this and provided a slow "polyfill" that - // leverages os.SameFile if we detect a filesystem where link counts - // is not really supported. - return hardlinkKey{}, errNotAHardLink - } - - return hardlinkKey{dev: uint64(sys.Dev), inode: uint64(sys.Ino)}, nil -} diff --git a/vendor/github.com/containerd/continuity/hardlinks_windows.go b/vendor/github.com/containerd/continuity/hardlinks_windows.go deleted file mode 100644 index be516c560..000000000 --- a/vendor/github.com/containerd/continuity/hardlinks_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -package continuity - -import "os" - -type hardlinkKey struct{} - -func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) { - // NOTE(stevvooe): Obviously, this is not yet implemented. However, the - // makings of an implementation are available in src/os/types_windows.go. More - // investigation needs to be done to figure out exactly how to do this. - return hardlinkKey{}, errNotAHardLink -} diff --git a/vendor/github.com/containerd/continuity/ioutils.go b/vendor/github.com/containerd/continuity/ioutils.go deleted file mode 100644 index 0e2cc5fbd..000000000 --- a/vendor/github.com/containerd/continuity/ioutils.go +++ /dev/null @@ -1,39 +0,0 @@ -package continuity - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// atomicWriteFile writes data to a file by first writing to a temp -// file and calling rename. -func atomicWriteFile(filename string, r io.Reader, rf RegularFile) error { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return err - } - err = os.Chmod(f.Name(), rf.Mode()) - if err != nil { - f.Close() - return err - } - n, err := io.Copy(f, r) - if err == nil && n < rf.Size() { - f.Close() - return io.ErrShortWrite - } - if err != nil { - f.Close() - return err - } - if err := f.Sync(); err != nil { - f.Close() - return err - } - if err := f.Close(); err != nil { - return err - } - return os.Rename(f.Name(), filename) -} diff --git a/vendor/github.com/containerd/continuity/manifest.go b/vendor/github.com/containerd/continuity/manifest.go deleted file mode 100644 index 20706f359..000000000 --- a/vendor/github.com/containerd/continuity/manifest.go +++ /dev/null @@ -1,144 +0,0 @@ -package continuity - -import ( - "fmt" - "io" - "log" - "os" - "sort" - - pb "github.com/containerd/continuity/proto" - "github.com/golang/protobuf/proto" -) - -// Manifest provides the contents of a manifest. Users of this struct should -// not typically modify any fields directly. -type Manifest struct { - // Resources specifies all the resources for a manifest in order by path. - Resources []Resource -} - -func Unmarshal(p []byte) (*Manifest, error) { - var bm pb.Manifest - - if err := proto.Unmarshal(p, &bm); err != nil { - return nil, err - } - - var m Manifest - for _, b := range bm.Resource { - r, err := fromProto(b) - if err != nil { - return nil, err - } - - m.Resources = append(m.Resources, r) - } - - return &m, nil -} - -func Marshal(m *Manifest) ([]byte, error) { - var bm pb.Manifest - for _, resource := range m.Resources { - bm.Resource = append(bm.Resource, toProto(resource)) - } - - return proto.Marshal(&bm) -} - -func MarshalText(w io.Writer, m *Manifest) error { - var bm pb.Manifest - for _, resource := range m.Resources { - bm.Resource = append(bm.Resource, toProto(resource)) - } - - return proto.MarshalText(w, &bm) -} - -// BuildManifest creates the manifest for the given context -func BuildManifest(ctx Context) (*Manifest, error) { - resourcesByPath := map[string]Resource{} - hardlinks := newHardlinkManager() - - if err := ctx.Walk(func(p string, fi os.FileInfo, err error) error { - if err != nil { - return fmt.Errorf("error walking %s: %v", p, err) - } - - if p == "/" { - // skip root - return nil - } - - resource, err := ctx.Resource(p, fi) - if err != nil { - if err == ErrNotFound { - return nil - } - log.Printf("error getting resource %q: %v", p, err) - return err - } - - // add to the hardlink manager - if err := hardlinks.Add(fi, resource); err == nil { - // Resource has been accepted by hardlink manager so we don't add - // it to the resourcesByPath until we merge at the end. - return nil - } else if err != errNotAHardLink { - // handle any other case where we have a proper error. - return fmt.Errorf("adding hardlink %s: %v", p, err) - } - - resourcesByPath[p] = resource - - return nil - }); err != nil { - return nil, err - } - - // merge and post-process the hardlinks. - hardlinked, err := hardlinks.Merge() - if err != nil { - return nil, err - } - - for _, resource := range hardlinked { - resourcesByPath[resource.Path()] = resource - } - - var resources []Resource - for _, resource := range resourcesByPath { - resources = append(resources, resource) - } - - sort.Stable(ByPath(resources)) - - return &Manifest{ - Resources: resources, - }, nil -} - -// VerifyManifest verifies all the resources in a manifest -// against files from the given context. -func VerifyManifest(ctx Context, manifest *Manifest) error { - for _, resource := range manifest.Resources { - if err := ctx.Verify(resource); err != nil { - return err - } - } - - return nil -} - -// ApplyManifest applies on the resources in a manifest to -// the given context. -func ApplyManifest(ctx Context, manifest *Manifest) error { - for _, resource := range manifest.Resources { - if err := ctx.Apply(resource); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/containerd/continuity/manifest_test_darwin.go b/vendor/github.com/containerd/continuity/manifest_test_darwin.go deleted file mode 100644 index 873ec318e..000000000 --- a/vendor/github.com/containerd/continuity/manifest_test_darwin.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build ignore - -package continuity - -import "os" - -var ( - devNullResource = resource{ - kind: chardev, - path: "/dev/null", - major: 3, - minor: 2, - mode: 0666 | os.ModeDevice | os.ModeCharDevice, - } - - devZeroResource = resource{ - kind: chardev, - path: "/dev/zero", - major: 3, - minor: 3, - mode: 0666 | os.ModeDevice | os.ModeCharDevice, - } -) diff --git a/vendor/github.com/containerd/continuity/resource.go b/vendor/github.com/containerd/continuity/resource.go deleted file mode 100644 index 3643effb3..000000000 --- a/vendor/github.com/containerd/continuity/resource.go +++ /dev/null @@ -1,574 +0,0 @@ -package continuity - -import ( - "errors" - "fmt" - "os" - "reflect" - "sort" - - pb "github.com/containerd/continuity/proto" - "github.com/opencontainers/go-digest" -) - -// TODO(stevvooe): A record based model, somewhat sketched out at the bottom -// of this file, will be more flexible. Another possibly is to tie the package -// interface directly to the protobuf type. This will have efficiency -// advantages at the cost coupling the nasty codegen types to the exported -// interface. - -type Resource interface { - // Path provides the primary resource path relative to the bundle root. In - // cases where resources have more than one path, such as with hard links, - // this will return the primary path, which is often just the first entry. - Path() string - - // Mode returns the - Mode() os.FileMode - - UID() int64 - GID() int64 -} - -// ByPath provides the canonical sort order for a set of resources. Use with -// sort.Stable for deterministic sorting. -type ByPath []Resource - -func (bp ByPath) Len() int { return len(bp) } -func (bp ByPath) Swap(i, j int) { bp[i], bp[j] = bp[j], bp[i] } -func (bp ByPath) Less(i, j int) bool { return bp[i].Path() < bp[j].Path() } - -type XAttrer interface { - XAttrs() map[string][]byte -} - -// Hardlinkable is an interface that a resource type satisfies if it can be a -// hardlink target. -type Hardlinkable interface { - // Paths returns all paths of the resource, including the primary path - // returned by Resource.Path. If len(Paths()) > 1, the resource is a hard - // link. - Paths() []string -} - -type RegularFile interface { - Resource - XAttrer - Hardlinkable - - Size() int64 - Digests() []digest.Digest -} - -// Merge two or more Resources into new file. Typically, this should be -// used to merge regular files as hardlinks. If the files are not identical, -// other than Paths and Digests, the merge will fail and an error will be -// returned. -func Merge(fs ...Resource) (Resource, error) { - if len(fs) < 1 { - return nil, fmt.Errorf("please provide a resource to merge") - } - - if len(fs) == 1 { - return fs[0], nil - } - - var paths []string - var digests []digest.Digest - bypath := map[string][]Resource{} - - // The attributes are all compared against the first to make sure they - // agree before adding to the above collections. If any of these don't - // correctly validate, the merge fails. - prototype := fs[0] - xattrs := make(map[string][]byte) - - // initialize xattrs for use below. All files must have same xattrs. - if prototypeXAttrer, ok := prototype.(XAttrer); ok { - for attr, value := range prototypeXAttrer.XAttrs() { - xattrs[attr] = value - } - } - - for _, f := range fs { - h, isHardlinkable := f.(Hardlinkable) - if !isHardlinkable { - return nil, errNotAHardLink - } - - if f.Mode() != prototype.Mode() { - return nil, fmt.Errorf("modes do not match: %v != %v", f.Mode(), prototype.Mode()) - } - - if f.UID() != prototype.UID() { - return nil, fmt.Errorf("uid does not match: %v != %v", f.UID(), prototype.UID()) - } - - if f.GID() != prototype.GID() { - return nil, fmt.Errorf("gid does not match: %v != %v", f.GID(), prototype.GID()) - } - - if xattrer, ok := f.(XAttrer); ok { - fxattrs := xattrer.XAttrs() - if !reflect.DeepEqual(fxattrs, xattrs) { - return nil, fmt.Errorf("resource %q xattrs do not match: %v != %v", f, fxattrs, xattrs) - } - } - - for _, p := range h.Paths() { - pfs, ok := bypath[p] - if !ok { - // ensure paths are unique by only appending on a new path. - paths = append(paths, p) - } - - bypath[p] = append(pfs, f) - } - - if regFile, isRegFile := f.(RegularFile); isRegFile { - prototypeRegFile, prototypeIsRegFile := prototype.(RegularFile) - if !prototypeIsRegFile { - return nil, errors.New("prototype is not a regular file") - } - - if regFile.Size() != prototypeRegFile.Size() { - return nil, fmt.Errorf("size does not match: %v != %v", regFile.Size(), prototypeRegFile.Size()) - } - - digests = append(digests, regFile.Digests()...) - } else if device, isDevice := f.(Device); isDevice { - prototypeDevice, prototypeIsDevice := prototype.(Device) - if !prototypeIsDevice { - return nil, errors.New("prototype is not a device") - } - - if device.Major() != prototypeDevice.Major() { - return nil, fmt.Errorf("major number does not match: %v != %v", device.Major(), prototypeDevice.Major()) - } - if device.Minor() != prototypeDevice.Minor() { - return nil, fmt.Errorf("minor number does not match: %v != %v", device.Minor(), prototypeDevice.Minor()) - } - } else if _, isNamedPipe := f.(NamedPipe); isNamedPipe { - _, prototypeIsNamedPipe := prototype.(NamedPipe) - if !prototypeIsNamedPipe { - return nil, errors.New("prototype is not a named pipe") - } - } else { - return nil, errNotAHardLink - } - } - - sort.Stable(sort.StringSlice(paths)) - - // Choose a "canonical" file. Really, it is just the first file to sort - // against. We also effectively select the very first digest as the - // "canonical" one for this file. - first := bypath[paths[0]][0] - - resource := resource{ - paths: paths, - mode: first.Mode(), - uid: first.UID(), - gid: first.GID(), - xattrs: xattrs, - } - - switch typedF := first.(type) { - case RegularFile: - var err error - digests, err = uniqifyDigests(digests...) - if err != nil { - return nil, err - } - - return ®ularFile{ - resource: resource, - size: typedF.Size(), - digests: digests, - }, nil - case Device: - return &device{ - resource: resource, - major: typedF.Major(), - minor: typedF.Minor(), - }, nil - - case NamedPipe: - return &namedPipe{ - resource: resource, - }, nil - - default: - return nil, errNotAHardLink - } -} - -type Directory interface { - Resource - XAttrer - - // Directory is a no-op method to identify directory objects by interface. - Directory() -} - -type SymLink interface { - Resource - - // Target returns the target of the symlink contained in the . - Target() string -} - -type NamedPipe interface { - Resource - Hardlinkable - XAttrer - - // Pipe is a no-op method to allow consistent resolution of NamedPipe - // interface. - Pipe() -} - -type Device interface { - Resource - Hardlinkable - XAttrer - - Major() uint64 - Minor() uint64 -} - -type resource struct { - paths []string - mode os.FileMode - uid, gid int64 - xattrs map[string][]byte -} - -var _ Resource = &resource{} - -func (r *resource) Path() string { - if len(r.paths) < 1 { - return "" - } - - return r.paths[0] -} - -func (r *resource) Mode() os.FileMode { - return r.mode -} - -func (r *resource) UID() int64 { - return r.uid -} - -func (r *resource) GID() int64 { - return r.gid -} - -type regularFile struct { - resource - size int64 - digests []digest.Digest -} - -var _ RegularFile = ®ularFile{} - -// newRegularFile returns the RegularFile, using the populated base resource -// and one or more digests of the content. -func newRegularFile(base resource, paths []string, size int64, dgsts ...digest.Digest) (RegularFile, error) { - if !base.Mode().IsRegular() { - return nil, fmt.Errorf("not a regular file") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - // make our own copy of digests - ds := make([]digest.Digest, len(dgsts)) - copy(ds, dgsts) - - return ®ularFile{ - resource: base, - size: size, - digests: ds, - }, nil -} - -func (rf *regularFile) Paths() []string { - paths := make([]string, len(rf.paths)) - copy(paths, rf.paths) - return paths -} - -func (rf *regularFile) Size() int64 { - return rf.size -} - -func (rf *regularFile) Digests() []digest.Digest { - digests := make([]digest.Digest, len(rf.digests)) - copy(digests, rf.digests) - return digests -} - -func (rf *regularFile) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(rf.xattrs)) - - for attr, value := range rf.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type directory struct { - resource -} - -var _ Directory = &directory{} - -func newDirectory(base resource) (Directory, error) { - if !base.Mode().IsDir() { - return nil, fmt.Errorf("not a directory") - } - - return &directory{ - resource: base, - }, nil -} - -func (d *directory) Directory() {} - -func (d *directory) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(d.xattrs)) - - for attr, value := range d.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type symLink struct { - resource - target string -} - -var _ SymLink = &symLink{} - -func newSymLink(base resource, target string) (SymLink, error) { - if base.Mode()&os.ModeSymlink == 0 { - return nil, fmt.Errorf("not a symlink") - } - - return &symLink{ - resource: base, - target: target, - }, nil -} - -func (l *symLink) Target() string { - return l.target -} - -type namedPipe struct { - resource -} - -var _ NamedPipe = &namedPipe{} - -func newNamedPipe(base resource, paths []string) (NamedPipe, error) { - if base.Mode()&os.ModeNamedPipe == 0 { - return nil, fmt.Errorf("not a namedpipe") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - return &namedPipe{ - resource: base, - }, nil -} - -func (np *namedPipe) Pipe() {} - -func (np *namedPipe) Paths() []string { - paths := make([]string, len(np.paths)) - copy(paths, np.paths) - return paths -} - -func (np *namedPipe) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(np.xattrs)) - - for attr, value := range np.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -type device struct { - resource - major, minor uint64 -} - -var _ Device = &device{} - -func newDevice(base resource, paths []string, major, minor uint64) (Device, error) { - if base.Mode()&os.ModeDevice == 0 { - return nil, fmt.Errorf("not a device") - } - - base.paths = make([]string, len(paths)) - copy(base.paths, paths) - - return &device{ - resource: base, - major: major, - minor: minor, - }, nil -} - -func (d *device) Paths() []string { - paths := make([]string, len(d.paths)) - copy(paths, d.paths) - return paths -} - -func (d *device) XAttrs() map[string][]byte { - xattrs := make(map[string][]byte, len(d.xattrs)) - - for attr, value := range d.xattrs { - xattrs[attr] = append(xattrs[attr], value...) - } - - return xattrs -} - -func (d device) Major() uint64 { - return d.major -} - -func (d device) Minor() uint64 { - return d.minor -} - -// toProto converts a resource to a protobuf record. We'd like to push this -// the individual types but we want to keep this all together during -// prototyping. -func toProto(resource Resource) *pb.Resource { - b := &pb.Resource{ - Path: []string{resource.Path()}, - Mode: uint32(resource.Mode()), - Uid: resource.UID(), - Gid: resource.GID(), - } - - if xattrer, ok := resource.(XAttrer); ok { - // Sorts the XAttrs by name for consistent ordering. - keys := []string{} - xattrs := xattrer.XAttrs() - for k := range xattrs { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - b.Xattr = append(b.Xattr, &pb.XAttr{Name: k, Data: xattrs[k]}) - } - } - - switch r := resource.(type) { - case RegularFile: - b.Path = r.Paths() - b.Size = uint64(r.Size()) - - for _, dgst := range r.Digests() { - b.Digest = append(b.Digest, dgst.String()) - } - case SymLink: - b.Target = r.Target() - case Device: - b.Major, b.Minor = r.Major(), r.Minor() - b.Path = r.Paths() - case NamedPipe: - b.Path = r.Paths() - } - - // enforce a few stability guarantees that may not be provided by the - // resource implementation. - sort.Strings(b.Path) - - return b -} - -// fromProto converts from a protobuf Resource to a Resource interface. -func fromProto(b *pb.Resource) (Resource, error) { - base := &resource{ - paths: b.Path, - mode: os.FileMode(b.Mode), - uid: b.Uid, - gid: b.Gid, - } - - base.xattrs = make(map[string][]byte, len(b.Xattr)) - - for _, attr := range b.Xattr { - base.xattrs[attr.Name] = attr.Data - } - - switch { - case base.Mode().IsRegular(): - dgsts := make([]digest.Digest, len(b.Digest)) - for i, dgst := range b.Digest { - // TODO(stevvooe): Should we be validating at this point? - dgsts[i] = digest.Digest(dgst) - } - - return newRegularFile(*base, b.Path, int64(b.Size), dgsts...) - case base.Mode().IsDir(): - return newDirectory(*base) - case base.Mode()&os.ModeSymlink != 0: - return newSymLink(*base, b.Target) - case base.Mode()&os.ModeNamedPipe != 0: - return newNamedPipe(*base, b.Path) - case base.Mode()&os.ModeDevice != 0: - return newDevice(*base, b.Path, b.Major, b.Minor) - } - - return nil, fmt.Errorf("unknown resource record (%#v): %s", b, base.Mode()) -} - -// NOTE(stevvooe): An alternative model that supports inline declaration. -// Convenient for unit testing where inline declarations may be desirable but -// creates an awkward API for the standard use case. - -// type ResourceKind int - -// const ( -// ResourceRegularFile = iota + 1 -// ResourceDirectory -// ResourceSymLink -// Resource -// ) - -// type Resource struct { -// Kind ResourceKind -// Paths []string -// Mode os.FileMode -// UID string -// GID string -// Size int64 -// Digests []digest.Digest -// Target string -// Major, Minor int -// XAttrs map[string][]byte -// } - -// type RegularFile struct { -// Paths []string -// Size int64 -// Digests []digest.Digest -// Perm os.FileMode // os.ModePerm + sticky, setuid, setgid -// } diff --git a/vendor/github.com/containerd/continuity/resource_unix.go b/vendor/github.com/containerd/continuity/resource_unix.go deleted file mode 100644 index 4144643e0..000000000 --- a/vendor/github.com/containerd/continuity/resource_unix.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build linux darwin freebsd solaris - -package continuity - -import ( - "fmt" - "os" - "syscall" -) - -// newBaseResource returns a *resource, populated with data from p and fi, -// where p will be populated directly. -func newBaseResource(p string, fi os.FileInfo) (*resource, error) { - // TODO(stevvooe): This need to be resolved for the container's root, - // where here we are really getting the host OS's value. We need to allow - // this be passed in and fixed up to make these uid/gid mappings portable. - // Either this can be part of the driver or we can achieve it through some - // other mechanism. - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - // TODO(stevvooe): This may not be a hard error for all platforms. We - // may want to move this to the driver. - return nil, fmt.Errorf("unable to resolve syscall.Stat_t from (os.FileInfo).Sys(): %#v", fi) - } - - return &resource{ - paths: []string{p}, - mode: fi.Mode(), - - uid: int64(sys.Uid), - gid: int64(sys.Gid), - - // NOTE(stevvooe): Population of shared xattrs field is deferred to - // the resource types that populate it. Since they are a property of - // the context, they must set there. - }, nil -} diff --git a/vendor/github.com/containerd/continuity/resource_windows.go b/vendor/github.com/containerd/continuity/resource_windows.go deleted file mode 100644 index 7b44414ac..000000000 --- a/vendor/github.com/containerd/continuity/resource_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -package continuity - -import "os" - -// newBaseResource returns a *resource, populated with data from p and fi, -// where p will be populated directly. -func newBaseResource(p string, fi os.FileInfo) (*resource, error) { - return &resource{ - paths: []string{p}, - mode: fi.Mode(), - }, nil -} diff --git a/vendor/github.com/coreos/etcd/auth/doc.go b/vendor/github.com/coreos/etcd/auth/doc.go deleted file mode 100644 index 72741a107..000000000 --- a/vendor/github.com/coreos/etcd/auth/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package auth provides client role authentication for accessing keys in etcd. -package auth diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go deleted file mode 100644 index 214ae48c8..000000000 --- a/vendor/github.com/coreos/etcd/auth/jwt.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "crypto/rsa" - "io/ioutil" - - jwt "github.com/dgrijalva/jwt-go" - "golang.org/x/net/context" -) - -type tokenJWT struct { - signMethod string - signKey *rsa.PrivateKey - verifyKey *rsa.PublicKey -} - -func (t *tokenJWT) enable() {} -func (t *tokenJWT) disable() {} -func (t *tokenJWT) invalidateUser(string) {} -func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } - -func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { - // rev isn't used in JWT, it is only used in simple token - var ( - username string - revision uint64 - ) - - parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { - return t.verifyKey, nil - }) - - switch err.(type) { - case nil: - if !parsed.Valid { - plog.Warningf("invalid jwt token: %s", token) - return nil, false - } - - claims := parsed.Claims.(jwt.MapClaims) - - username = claims["username"].(string) - revision = uint64(claims["revision"].(float64)) - default: - plog.Warningf("failed to parse jwt token: %s", err) - return nil, false - } - - return &AuthInfo{Username: username, Revision: revision}, true -} - -func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { - // Future work: let a jwt token include permission information would be useful for - // permission checking in proxy side. - tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod), - jwt.MapClaims{ - "username": username, - "revision": revision, - }) - - token, err := tk.SignedString(t.signKey) - if err != nil { - plog.Debugf("failed to sign jwt token: %s", err) - return "", err - } - - plog.Debugf("jwt token: %s", token) - - return token, err -} - -func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) { - for k, v := range opts { - switch k { - case "sign-method": - jwtSignMethod = v - case "pub-key": - jwtPubKeyPath = v - case "priv-key": - jwtPrivKeyPath = v - default: - plog.Errorf("unknown token specific option: %s", k) - return "", "", "", ErrInvalidAuthOpts - } - } - - return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil -} - -func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) { - jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts) - if err != nil { - return nil, ErrInvalidAuthOpts - } - - t := &tokenJWT{} - - t.signMethod = jwtSignMethod - - verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath) - if err != nil { - plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err) - return nil, err - } - t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) - if err != nil { - plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err) - return nil, err - } - - signBytes, err := ioutil.ReadFile(jwtPrivKeyPath) - if err != nil { - plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err) - return nil, err - } - t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) - if err != nil { - plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err) - return nil, err - } - - return t, nil -} diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go deleted file mode 100644 index 691b65ba3..000000000 --- a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "github.com/coreos/etcd/auth/authpb" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/adt" -) - -func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions { - user := getUser(tx, userName) - if user == nil { - plog.Errorf("invalid user name %s", userName) - return nil - } - - readPerms := &adt.IntervalTree{} - writePerms := &adt.IntervalTree{} - - for _, roleName := range user.Roles { - role := getRole(tx, roleName) - if role == nil { - continue - } - - for _, perm := range role.KeyPermission { - var ivl adt.Interval - var rangeEnd []byte - - if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 { - rangeEnd = perm.RangeEnd - } - - if len(perm.RangeEnd) != 0 { - ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd) - } else { - ivl = adt.NewBytesAffinePoint(perm.Key) - } - - switch perm.PermType { - case authpb.READWRITE: - readPerms.Insert(ivl, struct{}{}) - writePerms.Insert(ivl, struct{}{}) - - case authpb.READ: - readPerms.Insert(ivl, struct{}{}) - - case authpb.WRITE: - writePerms.Insert(ivl, struct{}{}) - } - } - } - - return &unifiedRangePermissions{ - readPerms: readPerms, - writePerms: writePerms, - } -} - -func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { - if len(rangeEnd) == 1 && rangeEnd[0] == 0 { - rangeEnd = nil - } - - ivl := adt.NewBytesAffineInterval(key, rangeEnd) - switch permtyp { - case authpb.READ: - return cachedPerms.readPerms.Contains(ivl) - case authpb.WRITE: - return cachedPerms.writePerms.Contains(ivl) - default: - plog.Panicf("unknown auth type: %v", permtyp) - } - return false -} - -func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { - pt := adt.NewBytesAffinePoint(key) - switch permtyp { - case authpb.READ: - return cachedPerms.readPerms.Intersects(pt) - case authpb.WRITE: - return cachedPerms.writePerms.Intersects(pt) - default: - plog.Panicf("unknown auth type: %v", permtyp) - } - return false -} - -func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { - // assumption: tx is Lock()ed - _, ok := as.rangePermCache[userName] - if !ok { - perms := getMergedPerms(tx, userName) - if perms == nil { - plog.Errorf("failed to create a unified permission of user %s", userName) - return false - } - as.rangePermCache[userName] = perms - } - - if len(rangeEnd) == 0 { - return checkKeyPoint(as.rangePermCache[userName], key, permtyp) - } - - return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp) -} - -func (as *authStore) clearCachedPerm() { - as.rangePermCache = make(map[string]*unifiedRangePermissions) -} - -func (as *authStore) invalidateCachedPerm(userName string) { - delete(as.rangePermCache, userName) -} - -type unifiedRangePermissions struct { - readPerms *adt.IntervalTree - writePerms *adt.IntervalTree -} diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go deleted file mode 100644 index 94d92a115..000000000 --- a/vendor/github.com/coreos/etcd/auth/simple_token.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -// CAUTION: This randum number based token mechanism is only for testing purpose. -// JWT based mechanism will be added in the near future. - -import ( - "crypto/rand" - "fmt" - "math/big" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/context" -) - -const ( - letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - defaultSimpleTokenLength = 16 -) - -// var for testing purposes -var ( - simpleTokenTTL = 5 * time.Minute - simpleTokenTTLResolution = 1 * time.Second -) - -type simpleTokenTTLKeeper struct { - tokens map[string]time.Time - donec chan struct{} - stopc chan struct{} - deleteTokenFunc func(string) - mu *sync.Mutex -} - -func (tm *simpleTokenTTLKeeper) stop() { - select { - case tm.stopc <- struct{}{}: - case <-tm.donec: - } - <-tm.donec -} - -func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) { - tm.tokens[token] = time.Now().Add(simpleTokenTTL) -} - -func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) { - if _, ok := tm.tokens[token]; ok { - tm.tokens[token] = time.Now().Add(simpleTokenTTL) - } -} - -func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) { - delete(tm.tokens, token) -} - -func (tm *simpleTokenTTLKeeper) run() { - tokenTicker := time.NewTicker(simpleTokenTTLResolution) - defer func() { - tokenTicker.Stop() - close(tm.donec) - }() - for { - select { - case <-tokenTicker.C: - nowtime := time.Now() - tm.mu.Lock() - for t, tokenendtime := range tm.tokens { - if nowtime.After(tokenendtime) { - tm.deleteTokenFunc(t) - delete(tm.tokens, t) - } - } - tm.mu.Unlock() - case <-tm.stopc: - return - } - } -} - -type tokenSimple struct { - indexWaiter func(uint64) <-chan struct{} - simpleTokenKeeper *simpleTokenTTLKeeper - simpleTokensMu sync.Mutex - simpleTokens map[string]string // token -> username -} - -func (t *tokenSimple) genTokenPrefix() (string, error) { - ret := make([]byte, defaultSimpleTokenLength) - - for i := 0; i < defaultSimpleTokenLength; i++ { - bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters)))) - if err != nil { - return "", err - } - - ret[i] = letters[bInt.Int64()] - } - - return string(ret), nil -} - -func (t *tokenSimple) assignSimpleTokenToUser(username, token string) { - t.simpleTokensMu.Lock() - _, ok := t.simpleTokens[token] - if ok { - plog.Panicf("token %s is alredy used", token) - } - - t.simpleTokens[token] = username - t.simpleTokenKeeper.addSimpleToken(token) - t.simpleTokensMu.Unlock() -} - -func (t *tokenSimple) invalidateUser(username string) { - if t.simpleTokenKeeper == nil { - return - } - t.simpleTokensMu.Lock() - for token, name := range t.simpleTokens { - if strings.Compare(name, username) == 0 { - delete(t.simpleTokens, token) - t.simpleTokenKeeper.deleteSimpleToken(token) - } - } - t.simpleTokensMu.Unlock() -} - -func (t *tokenSimple) enable() { - delf := func(tk string) { - if username, ok := t.simpleTokens[tk]; ok { - plog.Infof("deleting token %s for user %s", tk, username) - delete(t.simpleTokens, tk) - } - } - t.simpleTokenKeeper = &simpleTokenTTLKeeper{ - tokens: make(map[string]time.Time), - donec: make(chan struct{}), - stopc: make(chan struct{}), - deleteTokenFunc: delf, - mu: &t.simpleTokensMu, - } - go t.simpleTokenKeeper.run() -} - -func (t *tokenSimple) disable() { - t.simpleTokensMu.Lock() - tk := t.simpleTokenKeeper - t.simpleTokenKeeper = nil - t.simpleTokens = make(map[string]string) // invalidate all tokens - t.simpleTokensMu.Unlock() - if tk != nil { - tk.stop() - } -} - -func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) { - if !t.isValidSimpleToken(ctx, token) { - return nil, false - } - t.simpleTokensMu.Lock() - username, ok := t.simpleTokens[token] - if ok && t.simpleTokenKeeper != nil { - t.simpleTokenKeeper.resetSimpleToken(token) - } - t.simpleTokensMu.Unlock() - return &AuthInfo{Username: username, Revision: revision}, ok -} - -func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) { - // rev isn't used in simple token, it is only used in JWT - index := ctx.Value("index").(uint64) - simpleToken := ctx.Value("simpleToken").(string) - token := fmt.Sprintf("%s.%d", simpleToken, index) - t.assignSimpleTokenToUser(username, token) - - return token, nil -} - -func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool { - splitted := strings.Split(token, ".") - if len(splitted) != 2 { - return false - } - index, err := strconv.Atoi(splitted[1]) - if err != nil { - return false - } - - select { - case <-t.indexWaiter(uint64(index)): - return true - case <-ctx.Done(): - } - - return false -} - -func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple { - return &tokenSimple{ - simpleTokens: make(map[string]string), - indexWaiter: indexWaiter, - } -} diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go deleted file mode 100644 index 20b57f284..000000000 --- a/vendor/github.com/coreos/etcd/auth/store.go +++ /dev/null @@ -1,1059 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "bytes" - "encoding/binary" - "errors" - "sort" - "strings" - "sync" - "sync/atomic" - - "github.com/coreos/etcd/auth/authpb" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/pkg/capnslog" - "golang.org/x/crypto/bcrypt" - "golang.org/x/net/context" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" -) - -var ( - enableFlagKey = []byte("authEnabled") - authEnabled = []byte{1} - authDisabled = []byte{0} - - revisionKey = []byte("authRevision") - - authBucketName = []byte("auth") - authUsersBucketName = []byte("authUsers") - authRolesBucketName = []byte("authRoles") - - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth") - - ErrRootUserNotExist = errors.New("auth: root user does not exist") - ErrRootRoleNotExist = errors.New("auth: root user does not have root role") - ErrUserAlreadyExist = errors.New("auth: user already exists") - ErrUserEmpty = errors.New("auth: user name is empty") - ErrUserNotFound = errors.New("auth: user not found") - ErrRoleAlreadyExist = errors.New("auth: role already exists") - ErrRoleNotFound = errors.New("auth: role not found") - ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password") - ErrPermissionDenied = errors.New("auth: permission denied") - ErrRoleNotGranted = errors.New("auth: role is not granted to the user") - ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role") - ErrAuthNotEnabled = errors.New("auth: authentication is not enabled") - ErrAuthOldRevision = errors.New("auth: revision in header is old") - ErrInvalidAuthToken = errors.New("auth: invalid auth token") - ErrInvalidAuthOpts = errors.New("auth: invalid auth options") - ErrInvalidAuthMgmt = errors.New("auth: invalid auth management") - - // BcryptCost is the algorithm cost / strength for hashing auth passwords - BcryptCost = bcrypt.DefaultCost -) - -const ( - rootUser = "root" - rootRole = "root" - - revBytesLen = 8 -) - -type AuthInfo struct { - Username string - Revision uint64 -} - -type AuthStore interface { - // AuthEnable turns on the authentication feature - AuthEnable() error - - // AuthDisable turns off the authentication feature - AuthDisable() - - // Authenticate does authentication based on given user name and password - Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) - - // Recover recovers the state of auth store from the given backend - Recover(b backend.Backend) - - // UserAdd adds a new user - UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - - // UserDelete deletes a user - UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - - // UserChangePassword changes a password of a user - UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - - // UserGrantRole grants a role to the user - UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - - // UserGet gets the detailed information of a users - UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - - // UserRevokeRole revokes a role of a user - UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - - // RoleAdd adds a new role - RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - - // RoleGrantPermission grants a permission to a role - RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - - // RoleGet gets the detailed information of a role - RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - - // RoleRevokePermission gets the detailed information of a role - RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - - // RoleDelete gets the detailed information of a role - RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - - // UserList gets a list of all users - UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - - // RoleList gets a list of all roles - RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) - - // IsPutPermitted checks put permission of the user - IsPutPermitted(authInfo *AuthInfo, key []byte) error - - // IsRangePermitted checks range permission of the user - IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error - - // IsDeleteRangePermitted checks delete-range permission of the user - IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error - - // IsAdminPermitted checks admin permission of the user - IsAdminPermitted(authInfo *AuthInfo) error - - // GenTokenPrefix produces a random string in a case of simple token - // in a case of JWT, it produces an empty string - GenTokenPrefix() (string, error) - - // Revision gets current revision of authStore - Revision() uint64 - - // CheckPassword checks a given pair of username and password is correct - CheckPassword(username, password string) (uint64, error) - - // Close does cleanup of AuthStore - Close() error - - // AuthInfoFromCtx gets AuthInfo from gRPC's context - AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) - - // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context - AuthInfoFromTLS(ctx context.Context) *AuthInfo -} - -type TokenProvider interface { - info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) - assign(ctx context.Context, username string, revision uint64) (string, error) - enable() - disable() - - invalidateUser(string) - genTokenPrefix() (string, error) -} - -type authStore struct { - // atomic operations; need 64-bit align, or 32-bit tests will crash - revision uint64 - - be backend.Backend - enabled bool - enabledMu sync.RWMutex - - rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions - - tokenProvider TokenProvider -} - -func (as *authStore) AuthEnable() error { - as.enabledMu.Lock() - defer as.enabledMu.Unlock() - if as.enabled { - plog.Noticef("Authentication already enabled") - return nil - } - b := as.be - tx := b.BatchTx() - tx.Lock() - defer func() { - tx.Unlock() - b.ForceCommit() - }() - - u := getUser(tx, rootUser) - if u == nil { - return ErrRootUserNotExist - } - - if !hasRootRole(u) { - return ErrRootRoleNotExist - } - - tx.UnsafePut(authBucketName, enableFlagKey, authEnabled) - - as.enabled = true - as.tokenProvider.enable() - - as.rangePermCache = make(map[string]*unifiedRangePermissions) - - as.setRevision(getRevision(tx)) - - plog.Noticef("Authentication enabled") - - return nil -} - -func (as *authStore) AuthDisable() { - as.enabledMu.Lock() - defer as.enabledMu.Unlock() - if !as.enabled { - return - } - b := as.be - tx := b.BatchTx() - tx.Lock() - tx.UnsafePut(authBucketName, enableFlagKey, authDisabled) - as.commitRevision(tx) - tx.Unlock() - b.ForceCommit() - - as.enabled = false - as.tokenProvider.disable() - - plog.Noticef("Authentication disabled") -} - -func (as *authStore) Close() error { - as.enabledMu.Lock() - defer as.enabledMu.Unlock() - if !as.enabled { - return nil - } - as.tokenProvider.disable() - return nil -} - -func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) { - if !as.isAuthEnabled() { - return nil, ErrAuthNotEnabled - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, username) - if user == nil { - return nil, ErrAuthFailed - } - - // Password checking is already performed in the API layer, so we don't need to check for now. - // Staleness of password can be detected with OCC in the API layer, too. - - token, err := as.tokenProvider.assign(ctx, username, as.Revision()) - if err != nil { - return nil, err - } - - plog.Debugf("authorized %s, token is %s", username, token) - return &pb.AuthenticateResponse{Token: token}, nil -} - -func (as *authStore) CheckPassword(username, password string) (uint64, error) { - if !as.isAuthEnabled() { - return 0, ErrAuthNotEnabled - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, username) - if user == nil { - return 0, ErrAuthFailed - } - - if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil { - plog.Noticef("authentication failed, invalid password for user %s", username) - return 0, ErrAuthFailed - } - - return getRevision(tx), nil -} - -func (as *authStore) Recover(be backend.Backend) { - enabled := false - as.be = be - tx := be.BatchTx() - tx.Lock() - _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0) - if len(vs) == 1 { - if bytes.Equal(vs[0], authEnabled) { - enabled = true - } - } - - as.setRevision(getRevision(tx)) - - tx.Unlock() - - as.enabledMu.Lock() - as.enabled = enabled - as.enabledMu.Unlock() -} - -func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - if len(r.Name) == 0 { - return nil, ErrUserEmpty - } - - hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) - if err != nil { - plog.Errorf("failed to hash password: %s", err) - return nil, err - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.Name) - if user != nil { - return nil, ErrUserAlreadyExist - } - - newUser := &authpb.User{ - Name: []byte(r.Name), - Password: hashed, - } - - putUser(tx, newUser) - - as.commitRevision(tx) - - plog.Noticef("added a new user: %s", r.Name) - - return &pb.AuthUserAddResponse{}, nil -} - -func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - if as.enabled && strings.Compare(r.Name, rootUser) == 0 { - plog.Errorf("the user root must not be deleted") - return nil, ErrInvalidAuthMgmt - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.Name) - if user == nil { - return nil, ErrUserNotFound - } - - delUser(tx, r.Name) - - as.commitRevision(tx) - - as.invalidateCachedPerm(r.Name) - as.tokenProvider.invalidateUser(r.Name) - - plog.Noticef("deleted a user: %s", r.Name) - - return &pb.AuthUserDeleteResponse{}, nil -} - -func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - // TODO(mitake): measure the cost of bcrypt.GenerateFromPassword() - // If the cost is too high, we should move the encryption to outside of the raft - hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) - if err != nil { - plog.Errorf("failed to hash password: %s", err) - return nil, err - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.Name) - if user == nil { - return nil, ErrUserNotFound - } - - updatedUser := &authpb.User{ - Name: []byte(r.Name), - Roles: user.Roles, - Password: hashed, - } - - putUser(tx, updatedUser) - - as.commitRevision(tx) - - as.invalidateCachedPerm(r.Name) - as.tokenProvider.invalidateUser(r.Name) - - plog.Noticef("changed a password of a user: %s", r.Name) - - return &pb.AuthUserChangePasswordResponse{}, nil -} - -func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.User) - if user == nil { - return nil, ErrUserNotFound - } - - if r.Role != rootRole { - role := getRole(tx, r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - } - - idx := sort.SearchStrings(user.Roles, r.Role) - if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 { - plog.Warningf("user %s is already granted role %s", r.User, r.Role) - return &pb.AuthUserGrantRoleResponse{}, nil - } - - user.Roles = append(user.Roles, r.Role) - sort.Sort(sort.StringSlice(user.Roles)) - - putUser(tx, user) - - as.invalidateCachedPerm(r.User) - - as.commitRevision(tx) - - plog.Noticef("granted role %s to user %s", r.Role, r.User) - return &pb.AuthUserGrantRoleResponse{}, nil -} - -func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var resp pb.AuthUserGetResponse - - user := getUser(tx, r.Name) - if user == nil { - return nil, ErrUserNotFound - } - resp.Roles = append(resp.Roles, user.Roles...) - return &resp, nil -} - -func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var resp pb.AuthUserListResponse - - users := getAllUsers(tx) - - for _, u := range users { - resp.Users = append(resp.Users, string(u.Name)) - } - - return &resp, nil -} - -func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 { - plog.Errorf("the role root must not be revoked from the user root") - return nil, ErrInvalidAuthMgmt - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.Name) - if user == nil { - return nil, ErrUserNotFound - } - - updatedUser := &authpb.User{ - Name: user.Name, - Password: user.Password, - } - - for _, role := range user.Roles { - if strings.Compare(role, r.Role) != 0 { - updatedUser.Roles = append(updatedUser.Roles, role) - } - } - - if len(updatedUser.Roles) == len(user.Roles) { - return nil, ErrRoleNotGranted - } - - putUser(tx, updatedUser) - - as.invalidateCachedPerm(r.Name) - - as.commitRevision(tx) - - plog.Noticef("revoked role %s from user %s", r.Role, r.Name) - return &pb.AuthUserRevokeRoleResponse{}, nil -} - -func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var resp pb.AuthRoleGetResponse - - role := getRole(tx, r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - resp.Perm = append(resp.Perm, role.KeyPermission...) - return &resp, nil -} - -func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var resp pb.AuthRoleListResponse - - roles := getAllRoles(tx) - - for _, r := range roles { - resp.Roles = append(resp.Roles, string(r.Name)) - } - - return &resp, nil -} - -func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := getRole(tx, r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - - updatedRole := &authpb.Role{ - Name: role.Name, - } - - for _, perm := range role.KeyPermission { - if !bytes.Equal(perm.Key, []byte(r.Key)) || !bytes.Equal(perm.RangeEnd, []byte(r.RangeEnd)) { - updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm) - } - } - - if len(role.KeyPermission) == len(updatedRole.KeyPermission) { - return nil, ErrPermissionNotGranted - } - - putRole(tx, updatedRole) - - // TODO(mitake): currently single role update invalidates every cache - // It should be optimized. - as.clearCachedPerm() - - as.commitRevision(tx) - - plog.Noticef("revoked key %s from role %s", r.Key, r.Role) - return &pb.AuthRoleRevokePermissionResponse{}, nil -} - -func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - if as.enabled && strings.Compare(r.Role, rootRole) == 0 { - plog.Errorf("the role root must not be deleted") - return nil, ErrInvalidAuthMgmt - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := getRole(tx, r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - - delRole(tx, r.Role) - - users := getAllUsers(tx) - for _, user := range users { - updatedUser := &authpb.User{ - Name: user.Name, - Password: user.Password, - } - - for _, role := range user.Roles { - if strings.Compare(role, r.Role) != 0 { - updatedUser.Roles = append(updatedUser.Roles, role) - } - } - - if len(updatedUser.Roles) == len(user.Roles) { - continue - } - - putUser(tx, updatedUser) - - as.invalidateCachedPerm(string(user.Name)) - } - - as.commitRevision(tx) - - plog.Noticef("deleted role %s", r.Role) - return &pb.AuthRoleDeleteResponse{}, nil -} - -func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := getRole(tx, r.Name) - if role != nil { - return nil, ErrRoleAlreadyExist - } - - newRole := &authpb.Role{ - Name: []byte(r.Name), - } - - putRole(tx, newRole) - - as.commitRevision(tx) - - plog.Noticef("Role %s is created", r.Name) - - return &pb.AuthRoleAddResponse{}, nil -} - -func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) { - return as.tokenProvider.info(ctx, token, as.Revision()) -} - -type permSlice []*authpb.Permission - -func (perms permSlice) Len() int { - return len(perms) -} - -func (perms permSlice) Less(i, j int) bool { - return bytes.Compare(perms[i].Key, perms[j].Key) < 0 -} - -func (perms permSlice) Swap(i, j int) { - perms[i], perms[j] = perms[j], perms[i] -} - -func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := getRole(tx, r.Name) - if role == nil { - return nil, ErrRoleNotFound - } - - idx := sort.Search(len(role.KeyPermission), func(i int) bool { - return bytes.Compare(role.KeyPermission[i].Key, []byte(r.Perm.Key)) >= 0 - }) - - if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) { - // update existing permission - role.KeyPermission[idx].PermType = r.Perm.PermType - } else { - // append new permission to the role - newPerm := &authpb.Permission{ - Key: []byte(r.Perm.Key), - RangeEnd: []byte(r.Perm.RangeEnd), - PermType: r.Perm.PermType, - } - - role.KeyPermission = append(role.KeyPermission, newPerm) - sort.Sort(permSlice(role.KeyPermission)) - } - - putRole(tx, role) - - // TODO(mitake): currently single role update invalidates every cache - // It should be optimized. - as.clearCachedPerm() - - as.commitRevision(tx) - - plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)]) - - return &pb.AuthRoleGrantPermissionResponse{}, nil -} - -func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error { - // TODO(mitake): this function would be costly so we need a caching mechanism - if !as.isAuthEnabled() { - return nil - } - - // only gets rev == 0 when passed AuthInfo{}; no user given - if revision == 0 { - return ErrUserEmpty - } - - if revision < as.Revision() { - return ErrAuthOldRevision - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, userName) - if user == nil { - plog.Errorf("invalid user name %s for permission checking", userName) - return ErrPermissionDenied - } - - // root role should have permission on all ranges - if hasRootRole(user) { - return nil - } - - if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) { - return nil - } - - return ErrPermissionDenied -} - -func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error { - return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE) -} - -func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { - return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ) -} - -func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { - return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE) -} - -func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { - if !as.isAuthEnabled() { - return nil - } - if authInfo == nil { - return ErrUserEmpty - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - u := getUser(tx, authInfo.Username) - if u == nil { - return ErrUserNotFound - } - - if !hasRootRole(u) { - return ErrPermissionDenied - } - - return nil -} - -func getUser(tx backend.BatchTx, username string) *authpb.User { - _, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0) - if len(vs) == 0 { - return nil - } - - user := &authpb.User{} - err := user.Unmarshal(vs[0]) - if err != nil { - plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err) - } - return user -} - -func getAllUsers(tx backend.BatchTx) []*authpb.User { - _, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1) - if len(vs) == 0 { - return nil - } - - var users []*authpb.User - - for _, v := range vs { - user := &authpb.User{} - err := user.Unmarshal(v) - if err != nil { - plog.Panicf("failed to unmarshal user struct: %s", err) - } - - users = append(users, user) - } - - return users -} - -func putUser(tx backend.BatchTx, user *authpb.User) { - b, err := user.Marshal() - if err != nil { - plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err) - } - tx.UnsafePut(authUsersBucketName, user.Name, b) -} - -func delUser(tx backend.BatchTx, username string) { - tx.UnsafeDelete(authUsersBucketName, []byte(username)) -} - -func getRole(tx backend.BatchTx, rolename string) *authpb.Role { - _, vs := tx.UnsafeRange(authRolesBucketName, []byte(rolename), nil, 0) - if len(vs) == 0 { - return nil - } - - role := &authpb.Role{} - err := role.Unmarshal(vs[0]) - if err != nil { - plog.Panicf("failed to unmarshal role struct (name: %s): %s", rolename, err) - } - return role -} - -func getAllRoles(tx backend.BatchTx) []*authpb.Role { - _, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1) - if len(vs) == 0 { - return nil - } - - var roles []*authpb.Role - - for _, v := range vs { - role := &authpb.Role{} - err := role.Unmarshal(v) - if err != nil { - plog.Panicf("failed to unmarshal role struct: %s", err) - } - - roles = append(roles, role) - } - - return roles -} - -func putRole(tx backend.BatchTx, role *authpb.Role) { - b, err := role.Marshal() - if err != nil { - plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err) - } - - tx.UnsafePut(authRolesBucketName, []byte(role.Name), b) -} - -func delRole(tx backend.BatchTx, rolename string) { - tx.UnsafeDelete(authRolesBucketName, []byte(rolename)) -} - -func (as *authStore) isAuthEnabled() bool { - as.enabledMu.RLock() - defer as.enabledMu.RUnlock() - return as.enabled -} - -func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { - tx := be.BatchTx() - tx.Lock() - - tx.UnsafeCreateBucket(authBucketName) - tx.UnsafeCreateBucket(authUsersBucketName) - tx.UnsafeCreateBucket(authRolesBucketName) - - enabled := false - _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0) - if len(vs) == 1 { - if bytes.Equal(vs[0], authEnabled) { - enabled = true - } - } - - as := &authStore{ - be: be, - revision: getRevision(tx), - enabled: enabled, - rangePermCache: make(map[string]*unifiedRangePermissions), - tokenProvider: tp, - } - - if enabled { - as.tokenProvider.enable() - } - - if as.Revision() == 0 { - as.commitRevision(tx) - } - - tx.Unlock() - be.ForceCommit() - - return as -} - -func hasRootRole(u *authpb.User) bool { - for _, r := range u.Roles { - if r == rootRole { - return true - } - } - return false -} - -func (as *authStore) commitRevision(tx backend.BatchTx) { - atomic.AddUint64(&as.revision, 1) - revBytes := make([]byte, revBytesLen) - binary.BigEndian.PutUint64(revBytes, as.Revision()) - tx.UnsafePut(authBucketName, revisionKey, revBytes) -} - -func getRevision(tx backend.BatchTx) uint64 { - _, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0) - if len(vs) != 1 { - // this can happen in the initialization phase - return 0 - } - - return binary.BigEndian.Uint64(vs[0]) -} - -func (as *authStore) setRevision(rev uint64) { - atomic.StoreUint64(&as.revision, rev) -} - -func (as *authStore) Revision() uint64 { - return atomic.LoadUint64(&as.revision) -} - -func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { - peer, ok := peer.FromContext(ctx) - if !ok || peer == nil || peer.AuthInfo == nil { - return nil - } - - tlsInfo := peer.AuthInfo.(credentials.TLSInfo) - for _, chains := range tlsInfo.State.VerifiedChains { - for _, chain := range chains { - cn := chain.Subject.CommonName - plog.Debugf("found common name %s", cn) - - return &AuthInfo{ - Username: cn, - Revision: as.Revision(), - } - } - } - - return nil -} - -func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { - md, ok := metadata.FromContext(ctx) - if !ok { - return nil, nil - } - - ts, tok := md["token"] - if !tok { - return nil, nil - } - - token := ts[0] - authInfo, uok := as.authInfoFromToken(ctx, token) - if !uok { - plog.Warningf("invalid auth token: %s", token) - return nil, ErrInvalidAuthToken - } - return authInfo, nil -} - -func (as *authStore) GenTokenPrefix() (string, error) { - return as.tokenProvider.genTokenPrefix() -} - -func decomposeOpts(optstr string) (string, map[string]string, error) { - opts := strings.Split(optstr, ",") - tokenType := opts[0] - - typeSpecificOpts := make(map[string]string) - for i := 1; i < len(opts); i++ { - pair := strings.Split(opts[i], "=") - - if len(pair) != 2 { - plog.Errorf("invalid token specific option: %s", optstr) - return "", nil, ErrInvalidAuthOpts - } - - if _, ok := typeSpecificOpts[pair[0]]; ok { - plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr) - return "", nil, ErrInvalidAuthOpts - } - - typeSpecificOpts[pair[0]] = pair[1] - } - - return tokenType, typeSpecificOpts, nil - -} - -func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) { - tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts) - if err != nil { - return nil, ErrInvalidAuthOpts - } - - switch tokenType { - case "simple": - plog.Warningf("simple token is not cryptographically signed") - return newTokenProviderSimple(indexWaiter), nil - case "jwt": - return newTokenProviderJWT(typeSpecificOpts) - default: - plog.Errorf("unknown token type: %s", tokenType) - return nil, ErrInvalidAuthOpts - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go deleted file mode 100644 index 5e2de58e9..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -import ( - "sync" - - "github.com/coreos/etcd/version" - "github.com/coreos/go-semver/semver" - "github.com/coreos/pkg/capnslog" -) - -type Capability string - -const ( - AuthCapability Capability = "auth" - V3rpcCapability Capability = "v3rpc" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api") - - // capabilityMaps is a static map of version to capability map. - capabilityMaps = map[string]map[Capability]bool{ - "3.0.0": {AuthCapability: true, V3rpcCapability: true}, - "3.1.0": {AuthCapability: true, V3rpcCapability: true}, - "3.2.0": {AuthCapability: true, V3rpcCapability: true}, - } - - enableMapMu sync.RWMutex - // enabledMap points to a map in capabilityMaps - enabledMap map[Capability]bool - - curVersion *semver.Version -) - -func init() { - enabledMap = map[Capability]bool{ - AuthCapability: true, - V3rpcCapability: true, - } -} - -// UpdateCapability updates the enabledMap when the cluster version increases. -func UpdateCapability(v *semver.Version) { - if v == nil { - // if recovered but version was never set by cluster - return - } - enableMapMu.Lock() - if curVersion != nil && !curVersion.LessThan(*v) { - enableMapMu.Unlock() - return - } - curVersion = v - enabledMap = capabilityMaps[curVersion.String()] - enableMapMu.Unlock() - plog.Infof("enabled capabilities for version %s", version.Cluster(v.String())) -} - -func IsCapabilityEnabled(c Capability) bool { - enableMapMu.RLock() - defer enableMapMu.RUnlock() - if enabledMap == nil { - return false - } - return enabledMap[c] -} - -func EnableCapability(c Capability) { - enableMapMu.Lock() - defer enableMapMu.Unlock() - enabledMap[c] = true -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go deleted file mode 100644 index 87face4a1..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -import ( - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - - "github.com/coreos/go-semver/semver" -) - -// Cluster is an interface representing a collection of members in one etcd cluster. -type Cluster interface { - // ID returns the cluster ID - ID() types.ID - // ClientURLs returns an aggregate set of all URLs on which this - // cluster is listening for client requests - ClientURLs() []string - // Members returns a slice of members sorted by their ID - Members() []*membership.Member - // Member retrieves a particular member based on ID, or nil if the - // member does not exist in the cluster - Member(id types.ID) *membership.Member - // IsIDRemoved checks whether the given ID has been removed from this - // cluster at some point in the past - IsIDRemoved(id types.ID) bool - // Version is the cluster-wide minimum major.minor version. - Version() *semver.Version -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/doc.go deleted file mode 100644 index f44881be6..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package api manages the capabilities and features that are exposed to clients by the etcd cluster. -package api diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go deleted file mode 100644 index e66c5261d..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" -) - -type AuthServer struct { - authenticator etcdserver.Authenticator -} - -func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer { - return &AuthServer{authenticator: s} -} - -func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - resp, err := as.authenticator.AuthEnable(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - resp, err := as.authenticator.AuthDisable(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - resp, err := as.authenticator.Authenticate(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := as.authenticator.RoleAdd(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := as.authenticator.RoleDelete(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := as.authenticator.RoleGet(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := as.authenticator.RoleList(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := as.authenticator.RoleRevokePermission(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := as.authenticator.RoleGrantPermission(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := as.authenticator.UserAdd(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := as.authenticator.UserDelete(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := as.authenticator.UserGet(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := as.authenticator.UserList(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := as.authenticator.UserGrantRole(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := as.authenticator.UserRevokeRole(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := as.authenticator.UserChangePassword(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go deleted file mode 100644 index 17a2c87ae..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import "github.com/gogo/protobuf/proto" - -type codec struct{} - -func (c *codec) Marshal(v interface{}) ([]byte, error) { - b, err := proto.Marshal(v.(proto.Message)) - sentBytes.Add(float64(len(b))) - return b, err -} - -func (c *codec) Unmarshal(data []byte, v interface{}) error { - receivedBytes.Add(float64(len(data))) - return proto.Unmarshal(data, v.(proto.Message)) -} - -func (c *codec) String() string { - return "proto" -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go deleted file mode 100644 index 68a7c120e..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "crypto/tls" - "math" - - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" -) - -const maxStreams = math.MaxUint32 - -func init() { - grpclog.SetLogger(plog) -} - -func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { - var opts []grpc.ServerOption - opts = append(opts, grpc.CustomCodec(&codec{})) - if tls != nil { - opts = append(opts, grpc.Creds(credentials.NewTLS(tls))) - } - opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s))) - opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s))) - opts = append(opts, grpc.MaxConcurrentStreams(maxStreams)) - grpcServer := grpc.NewServer(opts...) - - pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) - pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) - pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) - pb.RegisterClusterServer(grpcServer, NewClusterServer(s)) - pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) - pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) - - return grpcServer -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go deleted file mode 100644 index d6d7f35d5..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type header struct { - clusterID int64 - memberID int64 - raftTimer etcdserver.RaftTimer - rev func() int64 -} - -func newHeader(s *etcdserver.EtcdServer) header { - return header{ - clusterID: int64(s.Cluster().ID()), - memberID: int64(s.ID()), - raftTimer: s, - rev: func() int64 { return s.KV().Rev() }, - } -} - -// fill populates pb.ResponseHeader using etcdserver information -func (h *header) fill(rh *pb.ResponseHeader) { - rh.ClusterId = uint64(h.clusterID) - rh.MemberId = uint64(h.memberID) - rh.RaftTerm = h.raftTimer.Term() - if rh.Revision == 0 { - rh.Revision = h.rev() - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go deleted file mode 100644 index 29aef2914..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "sync" - "time" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - - prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -const ( - maxNoLeaderCnt = 3 -) - -type streamsMap struct { - mu sync.Mutex - streams map[grpc.ServerStream]struct{} -} - -func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - if !api.IsCapabilityEnabled(api.V3rpcCapability) { - return nil, rpctypes.ErrGRPCNotCapable - } - - md, ok := metadata.FromContext(ctx) - if ok { - if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { - if s.Leader() == types.ID(raft.None) { - return nil, rpctypes.ErrGRPCNoLeader - } - } - } - - return prometheus.UnaryServerInterceptor(ctx, req, info, handler) - } -} - -func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor { - smap := monitorLeader(s) - - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if !api.IsCapabilityEnabled(api.V3rpcCapability) { - return rpctypes.ErrGRPCNotCapable - } - - md, ok := metadata.FromContext(ss.Context()) - if ok { - if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { - if s.Leader() == types.ID(raft.None) { - return rpctypes.ErrGRPCNoLeader - } - - cctx, cancel := context.WithCancel(ss.Context()) - ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss} - - smap.mu.Lock() - smap.streams[ss] = struct{}{} - smap.mu.Unlock() - - defer func() { - smap.mu.Lock() - delete(smap.streams, ss) - smap.mu.Unlock() - cancel() - }() - - } - } - - return prometheus.StreamServerInterceptor(srv, ss, info, handler) - } -} - -type serverStreamWithCtx struct { - grpc.ServerStream - ctx context.Context - cancel *context.CancelFunc -} - -func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx } - -func monitorLeader(s *etcdserver.EtcdServer) *streamsMap { - smap := &streamsMap{ - streams: make(map[grpc.ServerStream]struct{}), - } - - go func() { - election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond - noLeaderCnt := 0 - - for { - select { - case <-s.StopNotify(): - return - case <-time.After(election): - if s.Leader() == types.ID(raft.None) { - noLeaderCnt++ - } else { - noLeaderCnt = 0 - } - - // We are more conservative on canceling existing streams. Reconnecting streams - // cost much more than just rejecting new requests. So we wait until the member - // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams. - if noLeaderCnt >= maxNoLeaderCnt { - smap.mu.Lock() - for ss := range smap.streams { - if ssWithCtx, ok := ss.(serverStreamWithCtx); ok { - (*ssWithCtx.cancel)() - <-ss.Context().Done() - } - } - smap.streams = make(map[grpc.ServerStream]struct{}) - smap.mu.Unlock() - } - } - } - }() - - return smap -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go deleted file mode 100644 index d0220e03a..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v3rpc implements etcd v3 RPC system based on gRPC. -package v3rpc - -import ( - "sort" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/pkg/capnslog" - "golang.org/x/net/context" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc") - - // Max operations per txn list. For example, Txn.Success can have at most 128 operations, - // and Txn.Failure can have at most 128 operations. - MaxOpsPerTxn = 128 -) - -type kvServer struct { - hdr header - kv etcdserver.RaftKV -} - -func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer { - return &kvServer{hdr: newHeader(s), kv: s} -} - -func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if err := checkRangeRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.Range(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - if err := checkPutRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.Put(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - if err := checkDeleteRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.DeleteRange(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if err := checkTxnRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.Txn(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { - resp, err := s.kv.Compact(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func checkRangeRequest(r *pb.RangeRequest) error { - if len(r.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - return nil -} - -func checkPutRequest(r *pb.PutRequest) error { - if len(r.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - if r.IgnoreValue && len(r.Value) != 0 { - return rpctypes.ErrGRPCValueProvided - } - if r.IgnoreLease && r.Lease != 0 { - return rpctypes.ErrGRPCLeaseProvided - } - return nil -} - -func checkDeleteRequest(r *pb.DeleteRangeRequest) error { - if len(r.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - return nil -} - -func checkTxnRequest(r *pb.TxnRequest) error { - if len(r.Compare) > MaxOpsPerTxn || len(r.Success) > MaxOpsPerTxn || len(r.Failure) > MaxOpsPerTxn { - return rpctypes.ErrGRPCTooManyOps - } - - for _, c := range r.Compare { - if len(c.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - } - - for _, u := range r.Success { - if err := checkRequestOp(u); err != nil { - return err - } - } - if err := checkRequestDupKeys(r.Success); err != nil { - return err - } - - for _, u := range r.Failure { - if err := checkRequestOp(u); err != nil { - return err - } - } - return checkRequestDupKeys(r.Failure) -} - -// checkRequestDupKeys gives rpctypes.ErrGRPCDuplicateKey if the same key is modified twice -func checkRequestDupKeys(reqs []*pb.RequestOp) error { - // check put overlap - keys := make(map[string]struct{}) - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestPut) - if !ok { - continue - } - preq := tv.RequestPut - if preq == nil { - continue - } - if _, ok := keys[string(preq.Key)]; ok { - return rpctypes.ErrGRPCDuplicateKey - } - keys[string(preq.Key)] = struct{}{} - } - - // no need to check deletes if no puts; delete overlaps are permitted - if len(keys) == 0 { - return nil - } - - // sort keys for range checking - sortedKeys := []string{} - for k := range keys { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - - // check put overlap with deletes - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestDeleteRange) - if !ok { - continue - } - dreq := tv.RequestDeleteRange - if dreq == nil { - continue - } - if dreq.RangeEnd == nil { - if _, found := keys[string(dreq.Key)]; found { - return rpctypes.ErrGRPCDuplicateKey - } - } else { - lo := sort.SearchStrings(sortedKeys, string(dreq.Key)) - hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd)) - if lo != hi { - // element between lo and hi => overlap - return rpctypes.ErrGRPCDuplicateKey - } - } - } - - return nil -} - -func checkRequestOp(u *pb.RequestOp) error { - // TODO: ensure only one of the field is set. - switch uv := u.Request.(type) { - case *pb.RequestOp_RequestRange: - if uv.RequestRange != nil { - return checkRangeRequest(uv.RequestRange) - } - case *pb.RequestOp_RequestPut: - if uv.RequestPut != nil { - return checkPutRequest(uv.RequestPut) - } - case *pb.RequestOp_RequestDeleteRange: - if uv.RequestDeleteRange != nil { - return checkDeleteRequest(uv.RequestDeleteRange) - } - default: - // empty op / nil entry - return rpctypes.ErrGRPCKeyNotFound - } - return nil -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go deleted file mode 100644 index a25d0ce6a..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "io" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/lease" - "golang.org/x/net/context" -) - -type LeaseServer struct { - hdr header - le etcdserver.Lessor -} - -func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { - return &LeaseServer{le: s, hdr: newHeader(s)} -} - -func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - resp, err := ls.le.LeaseGrant(ctx, cr) - - if err != nil { - return nil, togRPCError(err) - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - resp, err := ls.le.LeaseRevoke(ctx, rr) - if err != nil { - return nil, togRPCError(err) - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - resp, err := ls.le.LeaseTimeToLive(ctx, rr) - if err != nil && err != lease.ErrLeaseNotFound { - return nil, togRPCError(err) - } - if err == lease.ErrLeaseNotFound { - resp = &pb.LeaseTimeToLiveResponse{ - Header: &pb.ResponseHeader{}, - ID: rr.ID, - TTL: -1, - } - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) { - errc := make(chan error, 1) - go func() { - errc <- ls.leaseKeepAlive(stream) - }() - select { - case err = <-errc: - case <-stream.Context().Done(): - // the only server-side cancellation is noleader for now. - err = stream.Context().Err() - if err == context.Canceled { - err = rpctypes.ErrGRPCNoLeader - } - } - return err -} - -func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { - for { - req, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - // Create header before we sent out the renew request. - // This can make sure that the revision is strictly smaller or equal to - // when the keepalive happened at the local server (when the local server is the leader) - // or remote leader. - // Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded - // at rev 4. - resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}} - ls.hdr.fill(resp.Header) - - ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID)) - if err == lease.ErrLeaseNotFound { - err = nil - ttl = 0 - } - - if err != nil { - return togRPCError(err) - } - - resp.TTL = ttl - err = stream.Send(resp) - if err != nil { - return err - } - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go deleted file mode 100644 index 3657d0360..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "crypto/sha256" - "io" - - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/version" - "golang.org/x/net/context" -) - -type KVGetter interface { - KV() mvcc.ConsistentWatchableKV -} - -type BackendGetter interface { - Backend() backend.Backend -} - -type Alarmer interface { - Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) -} - -type RaftStatusGetter interface { - Index() uint64 - Term() uint64 - Leader() types.ID -} - -type AuthGetter interface { - AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) - AuthStore() auth.AuthStore -} - -type maintenanceServer struct { - rg RaftStatusGetter - kg KVGetter - bg BackendGetter - a Alarmer - hdr header -} - -func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer { - srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)} - return &authMaintenanceServer{srv, s} -} - -func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - plog.Noticef("starting to defragment the storage backend...") - err := ms.bg.Backend().Defrag() - if err != nil { - plog.Errorf("failed to defragment the storage backend (%v)", err) - return nil, err - } - plog.Noticef("finished defragmenting the storage backend") - return &pb.DefragmentResponse{}, nil -} - -func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { - snap := ms.bg.Backend().Snapshot() - pr, pw := io.Pipe() - - defer pr.Close() - - go func() { - snap.WriteTo(pw) - if err := snap.Close(); err != nil { - plog.Errorf("error closing snapshot (%v)", err) - } - pw.Close() - }() - - // send file data - h := sha256.New() - br := int64(0) - buf := make([]byte, 32*1024) - sz := snap.Size() - for br < sz { - n, err := io.ReadFull(pr, buf) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return togRPCError(err) - } - br += int64(n) - resp := &pb.SnapshotResponse{ - RemainingBytes: uint64(sz - br), - Blob: buf[:n], - } - if err = srv.Send(resp); err != nil { - return togRPCError(err) - } - h.Write(buf[:n]) - } - - // send sha - sha := h.Sum(nil) - hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha} - if err := srv.Send(hresp); err != nil { - return togRPCError(err) - } - - return nil -} - -func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { - h, rev, err := ms.kg.KV().Hash() - if err != nil { - return nil, togRPCError(err) - } - resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h} - ms.hdr.fill(resp.Header) - return resp, nil -} - -func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { - return ms.a.Alarm(ctx, ar) -} - -func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { - resp := &pb.StatusResponse{ - Header: &pb.ResponseHeader{Revision: ms.hdr.rev()}, - Version: version.Version, - DbSize: ms.bg.Backend().Size(), - Leader: uint64(ms.rg.Leader()), - RaftIndex: ms.rg.Index(), - RaftTerm: ms.rg.Term(), - } - ms.hdr.fill(resp.Header) - return resp, nil -} - -type authMaintenanceServer struct { - *maintenanceServer - ag AuthGetter -} - -func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error { - authInfo, err := ams.ag.AuthInfoFromCtx(ctx) - if err != nil { - return err - } - - return ams.ag.AuthStore().IsAdminPermitted(authInfo) -} - -func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - if err := ams.isAuthenticated(ctx); err != nil { - return nil, err - } - - return ams.maintenanceServer.Defragment(ctx, sr) -} - -func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { - if err := ams.isAuthenticated(srv.Context()); err != nil { - return err - } - - return ams.maintenanceServer.Snapshot(sr, srv) -} - -func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { - if err := ams.isAuthenticated(ctx); err != nil { - return nil, err - } - - return ams.maintenanceServer.Hash(ctx, r) -} - -func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { - return ams.maintenanceServer.Status(ctx, ar) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go deleted file mode 100644 index 91a59389b..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "time" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - "golang.org/x/net/context" -) - -type ClusterServer struct { - cluster api.Cluster - server etcdserver.Server - raftTimer etcdserver.RaftTimer -} - -func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer { - return &ClusterServer{ - cluster: s.Cluster(), - server: s, - raftTimer: s, - } -} - -func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { - urls, err := types.NewURLs(r.PeerURLs) - if err != nil { - return nil, rpctypes.ErrGRPCMemberBadURLs - } - - now := time.Now() - m := membership.NewMember("", urls, "", &now) - membs, merr := cs.server.AddMember(ctx, *m) - if merr != nil { - return nil, togRPCError(merr) - } - - return &pb.MemberAddResponse{ - Header: cs.header(), - Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, - Members: membersToProtoMembers(membs), - }, nil -} - -func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - membs, err := cs.server.RemoveMember(ctx, r.ID) - if err != nil { - return nil, togRPCError(err) - } - return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil -} - -func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { - m := membership.Member{ - ID: types.ID(r.ID), - RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs}, - } - membs, err := cs.server.UpdateMember(ctx, m) - if err != nil { - return nil, togRPCError(err) - } - return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil -} - -func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - membs := membersToProtoMembers(cs.cluster.Members()) - return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil -} - -func (cs *ClusterServer) header() *pb.ResponseHeader { - return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()} -} - -func membersToProtoMembers(membs []*membership.Member) []*pb.Member { - protoMembs := make([]*pb.Member, len(membs)) - for i := range membs { - protoMembs[i] = &pb.Member{ - Name: membs[i].Name, - ID: uint64(membs[i].ID), - PeerURLs: membs[i].PeerURLs, - ClientURLs: membs[i].ClientURLs, - } - } - return protoMembs -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go deleted file mode 100644 index 6cb41a61e..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import "github.com/prometheus/client_golang/prometheus" - -var ( - sentBytes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "client_grpc_sent_bytes_total", - Help: "The total number of bytes sent to grpc clients.", - }) - - receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "client_grpc_received_bytes_total", - Help: "The total number of bytes received from grpc clients.", - }) -) - -func init() { - prometheus.MustRegister(sentBytes) - prometheus.MustRegister(receivedBytes) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go deleted file mode 100644 index 836f2fd3f..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/types" - "golang.org/x/net/context" -) - -type quotaKVServer struct { - pb.KVServer - qa quotaAlarmer -} - -type quotaAlarmer struct { - q etcdserver.Quota - a Alarmer - id types.ID -} - -// check whether request satisfies the quota. If there is not enough space, -// ignore request and raise the free space alarm. -func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { - if qa.q.Available(r) { - return nil - } - req := &pb.AlarmRequest{ - MemberID: uint64(qa.id), - Action: pb.AlarmRequest_ACTIVATE, - Alarm: pb.AlarmType_NOSPACE, - } - qa.a.Alarm(ctx, req) - return rpctypes.ErrGRPCNoSpace -} - -func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { - return "aKVServer{ - NewKVServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, - } -} - -func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - if err := s.qa.check(ctx, r); err != nil { - return nil, err - } - return s.KVServer.Put(ctx, r) -} - -func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if err := s.qa.check(ctx, r); err != nil { - return nil, err - } - return s.KVServer.Txn(ctx, r) -} - -type quotaLeaseServer struct { - pb.LeaseServer - qa quotaAlarmer -} - -func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - if err := s.qa.check(ctx, cr); err != nil { - return nil, err - } - return s.LeaseServer.LeaseGrant(ctx, cr) -} - -func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { - return "aLeaseServer{ - NewLeaseServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go deleted file mode 100644 index 8d38d9bd1..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -func togRPCError(err error) error { - switch err { - case membership.ErrIDRemoved: - return rpctypes.ErrGRPCMemberNotFound - case membership.ErrIDNotFound: - return rpctypes.ErrGRPCMemberNotFound - case membership.ErrIDExists: - return rpctypes.ErrGRPCMemberExist - case membership.ErrPeerURLexists: - return rpctypes.ErrGRPCPeerURLExist - case etcdserver.ErrNotEnoughStartedMembers: - return rpctypes.ErrMemberNotEnoughStarted - - case mvcc.ErrCompacted: - return rpctypes.ErrGRPCCompacted - case mvcc.ErrFutureRev: - return rpctypes.ErrGRPCFutureRev - case etcdserver.ErrRequestTooLarge: - return rpctypes.ErrGRPCRequestTooLarge - case etcdserver.ErrNoSpace: - return rpctypes.ErrGRPCNoSpace - case etcdserver.ErrTooManyRequests: - return rpctypes.ErrTooManyRequests - - case etcdserver.ErrNoLeader: - return rpctypes.ErrGRPCNoLeader - case etcdserver.ErrStopped: - return rpctypes.ErrGRPCStopped - case etcdserver.ErrTimeout: - return rpctypes.ErrGRPCTimeout - case etcdserver.ErrTimeoutDueToLeaderFail: - return rpctypes.ErrGRPCTimeoutDueToLeaderFail - case etcdserver.ErrTimeoutDueToConnectionLost: - return rpctypes.ErrGRPCTimeoutDueToConnectionLost - case etcdserver.ErrUnhealthy: - return rpctypes.ErrGRPCUnhealthy - case etcdserver.ErrKeyNotFound: - return rpctypes.ErrGRPCKeyNotFound - - case lease.ErrLeaseNotFound: - return rpctypes.ErrGRPCLeaseNotFound - case lease.ErrLeaseExists: - return rpctypes.ErrGRPCLeaseExist - - case auth.ErrRootUserNotExist: - return rpctypes.ErrGRPCRootUserNotExist - case auth.ErrRootRoleNotExist: - return rpctypes.ErrGRPCRootRoleNotExist - case auth.ErrUserAlreadyExist: - return rpctypes.ErrGRPCUserAlreadyExist - case auth.ErrUserEmpty: - return rpctypes.ErrGRPCUserEmpty - case auth.ErrUserNotFound: - return rpctypes.ErrGRPCUserNotFound - case auth.ErrRoleAlreadyExist: - return rpctypes.ErrGRPCRoleAlreadyExist - case auth.ErrRoleNotFound: - return rpctypes.ErrGRPCRoleNotFound - case auth.ErrAuthFailed: - return rpctypes.ErrGRPCAuthFailed - case auth.ErrPermissionDenied: - return rpctypes.ErrGRPCPermissionDenied - case auth.ErrRoleNotGranted: - return rpctypes.ErrGRPCRoleNotGranted - case auth.ErrPermissionNotGranted: - return rpctypes.ErrGRPCPermissionNotGranted - case auth.ErrAuthNotEnabled: - return rpctypes.ErrGRPCAuthNotEnabled - case auth.ErrInvalidAuthToken: - return rpctypes.ErrGRPCInvalidAuthToken - case auth.ErrInvalidAuthMgmt: - return rpctypes.ErrGRPCInvalidAuthMgmt - default: - return grpc.Errorf(codes.Unknown, err.Error()) - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go deleted file mode 100644 index 84c0a5eac..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go +++ /dev/null @@ -1,426 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "io" - "sync" - "time" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -type watchServer struct { - clusterID int64 - memberID int64 - raftTimer etcdserver.RaftTimer - watchable mvcc.WatchableKV - - ag AuthGetter -} - -func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { - return &watchServer{ - clusterID: int64(s.Cluster().ID()), - memberID: int64(s.ID()), - raftTimer: s, - watchable: s.Watchable(), - ag: s, - } -} - -var ( - // External test can read this with GetProgressReportInterval() - // and change this to a small value to finish fast with - // SetProgressReportInterval(). - progressReportInterval = 10 * time.Minute - progressReportIntervalMu sync.RWMutex -) - -func GetProgressReportInterval() time.Duration { - progressReportIntervalMu.RLock() - defer progressReportIntervalMu.RUnlock() - return progressReportInterval -} - -func SetProgressReportInterval(newTimeout time.Duration) { - progressReportIntervalMu.Lock() - defer progressReportIntervalMu.Unlock() - progressReportInterval = newTimeout -} - -const ( - // We send ctrl response inside the read loop. We do not want - // send to block read, but we still want ctrl response we sent to - // be serialized. Thus we use a buffered chan to solve the problem. - // A small buffer should be OK for most cases, since we expect the - // ctrl requests are infrequent. - ctrlStreamBufLen = 16 -) - -// serverWatchStream is an etcd server side stream. It receives requests -// from client side gRPC stream. It receives watch events from mvcc.WatchStream, -// and creates responses that forwarded to gRPC stream. -// It also forwards control message like watch created and canceled. -type serverWatchStream struct { - clusterID int64 - memberID int64 - raftTimer etcdserver.RaftTimer - - watchable mvcc.WatchableKV - - gRPCStream pb.Watch_WatchServer - watchStream mvcc.WatchStream - ctrlStream chan *pb.WatchResponse - - // mu protects progress, prevKV - mu sync.Mutex - // progress tracks the watchID that stream might need to send - // progress to. - // TODO: combine progress and prevKV into a single struct? - progress map[mvcc.WatchID]bool - prevKV map[mvcc.WatchID]bool - - // closec indicates the stream is closed. - closec chan struct{} - - // wg waits for the send loop to complete - wg sync.WaitGroup - - ag AuthGetter -} - -func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { - sws := serverWatchStream{ - clusterID: ws.clusterID, - memberID: ws.memberID, - raftTimer: ws.raftTimer, - - watchable: ws.watchable, - - gRPCStream: stream, - watchStream: ws.watchable.NewWatchStream(), - // chan for sending control response like watcher created and canceled. - ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen), - progress: make(map[mvcc.WatchID]bool), - prevKV: make(map[mvcc.WatchID]bool), - closec: make(chan struct{}), - - ag: ws.ag, - } - - sws.wg.Add(1) - go func() { - sws.sendLoop() - sws.wg.Done() - }() - - errc := make(chan error, 1) - // Ideally recvLoop would also use sws.wg to signal its completion - // but when stream.Context().Done() is closed, the stream's recv - // may continue to block since it uses a different context, leading to - // deadlock when calling sws.close(). - go func() { - if rerr := sws.recvLoop(); rerr != nil { - errc <- rerr - } - }() - select { - case err = <-errc: - close(sws.ctrlStream) - case <-stream.Context().Done(): - err = stream.Context().Err() - // the only server-side cancellation is noleader for now. - if err == context.Canceled { - err = rpctypes.ErrGRPCNoLeader - } - } - sws.close() - return err -} - -func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool { - authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context()) - if err != nil { - return false - } - if authInfo == nil { - // if auth is enabled, IsRangePermitted() can cause an error - authInfo = &auth.AuthInfo{} - } - - return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil -} - -func (sws *serverWatchStream) recvLoop() error { - for { - req, err := sws.gRPCStream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - switch uv := req.RequestUnion.(type) { - case *pb.WatchRequest_CreateRequest: - if uv.CreateRequest == nil { - break - } - - creq := uv.CreateRequest - if len(creq.Key) == 0 { - // \x00 is the smallest key - creq.Key = []byte{0} - } - if len(creq.RangeEnd) == 0 { - // force nil since watchstream.Watch distinguishes - // between nil and []byte{} for single key / >= - creq.RangeEnd = nil - } - if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 { - // support >= key queries - creq.RangeEnd = []byte{} - } - - if !sws.isWatchPermitted(creq) { - wr := &pb.WatchResponse{ - Header: sws.newResponseHeader(sws.watchStream.Rev()), - WatchId: -1, - Canceled: true, - Created: true, - CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(), - } - - select { - case sws.ctrlStream <- wr: - case <-sws.closec: - } - return nil - } - - filters := FiltersFromRequest(creq) - - wsrev := sws.watchStream.Rev() - rev := creq.StartRevision - if rev == 0 { - rev = wsrev + 1 - } - id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...) - if id != -1 { - sws.mu.Lock() - if creq.ProgressNotify { - sws.progress[id] = true - } - if creq.PrevKv { - sws.prevKV[id] = true - } - sws.mu.Unlock() - } - wr := &pb.WatchResponse{ - Header: sws.newResponseHeader(wsrev), - WatchId: int64(id), - Created: true, - Canceled: id == -1, - } - select { - case sws.ctrlStream <- wr: - case <-sws.closec: - return nil - } - case *pb.WatchRequest_CancelRequest: - if uv.CancelRequest != nil { - id := uv.CancelRequest.WatchId - err := sws.watchStream.Cancel(mvcc.WatchID(id)) - if err == nil { - sws.ctrlStream <- &pb.WatchResponse{ - Header: sws.newResponseHeader(sws.watchStream.Rev()), - WatchId: id, - Canceled: true, - } - sws.mu.Lock() - delete(sws.progress, mvcc.WatchID(id)) - delete(sws.prevKV, mvcc.WatchID(id)) - sws.mu.Unlock() - } - } - default: - // we probably should not shutdown the entire stream when - // receive an valid command. - // so just do nothing instead. - continue - } - } -} - -func (sws *serverWatchStream) sendLoop() { - // watch ids that are currently active - ids := make(map[mvcc.WatchID]struct{}) - // watch responses pending on a watch id creation message - pending := make(map[mvcc.WatchID][]*pb.WatchResponse) - - interval := GetProgressReportInterval() - progressTicker := time.NewTicker(interval) - - defer func() { - progressTicker.Stop() - // drain the chan to clean up pending events - for ws := range sws.watchStream.Chan() { - mvcc.ReportEventReceived(len(ws.Events)) - } - for _, wrs := range pending { - for _, ws := range wrs { - mvcc.ReportEventReceived(len(ws.Events)) - } - } - }() - - for { - select { - case wresp, ok := <-sws.watchStream.Chan(): - if !ok { - return - } - - // TODO: evs is []mvccpb.Event type - // either return []*mvccpb.Event from the mvcc package - // or define protocol buffer with []mvccpb.Event. - evs := wresp.Events - events := make([]*mvccpb.Event, len(evs)) - sws.mu.Lock() - needPrevKV := sws.prevKV[wresp.WatchID] - sws.mu.Unlock() - for i := range evs { - events[i] = &evs[i] - - if needPrevKV { - opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1} - r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt) - if err == nil && len(r.KVs) != 0 { - events[i].PrevKv = &(r.KVs[0]) - } - } - } - - wr := &pb.WatchResponse{ - Header: sws.newResponseHeader(wresp.Revision), - WatchId: int64(wresp.WatchID), - Events: events, - CompactRevision: wresp.CompactRevision, - } - - if _, hasId := ids[wresp.WatchID]; !hasId { - // buffer if id not yet announced - wrs := append(pending[wresp.WatchID], wr) - pending[wresp.WatchID] = wrs - continue - } - - mvcc.ReportEventReceived(len(evs)) - if err := sws.gRPCStream.Send(wr); err != nil { - return - } - - sws.mu.Lock() - if len(evs) > 0 && sws.progress[wresp.WatchID] { - // elide next progress update if sent a key update - sws.progress[wresp.WatchID] = false - } - sws.mu.Unlock() - - case c, ok := <-sws.ctrlStream: - if !ok { - return - } - - if err := sws.gRPCStream.Send(c); err != nil { - return - } - - // track id creation - wid := mvcc.WatchID(c.WatchId) - if c.Canceled { - delete(ids, wid) - continue - } - if c.Created { - // flush buffered events - ids[wid] = struct{}{} - for _, v := range pending[wid] { - mvcc.ReportEventReceived(len(v.Events)) - if err := sws.gRPCStream.Send(v); err != nil { - return - } - } - delete(pending, wid) - } - case <-progressTicker.C: - sws.mu.Lock() - for id, ok := range sws.progress { - if ok { - sws.watchStream.RequestProgress(id) - } - sws.progress[id] = true - } - sws.mu.Unlock() - case <-sws.closec: - return - } - } -} - -func (sws *serverWatchStream) close() { - sws.watchStream.Close() - close(sws.closec) - sws.wg.Wait() -} - -func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader { - return &pb.ResponseHeader{ - ClusterId: uint64(sws.clusterID), - MemberId: uint64(sws.memberID), - Revision: rev, - RaftTerm: sws.raftTimer.Term(), - } -} - -func filterNoDelete(e mvccpb.Event) bool { - return e.Type == mvccpb.DELETE -} - -func filterNoPut(e mvccpb.Event) bool { - return e.Type == mvccpb.PUT -} - -func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc { - filters := make([]mvcc.FilterFunc, 0, len(creq.Filters)) - for _, ft := range creq.Filters { - switch ft { - case pb.WatchCreateRequest_NOPUT: - filters = append(filters, filterNoPut) - case pb.WatchCreateRequest_NODELETE: - filters = append(filters, filterNoDelete) - default: - } - } - return filters -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go deleted file mode 100644 index 0be93c52b..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/apply.go +++ /dev/null @@ -1,878 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "bytes" - "sort" - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/types" - "github.com/gogo/protobuf/proto" - "golang.org/x/net/context" -) - -const ( - warnApplyDuration = 100 * time.Millisecond -) - -type applyResult struct { - resp proto.Message - err error - // physc signals the physical effect of the request has completed in addition - // to being logically reflected by the node. Currently only used for - // Compaction requests. - physc <-chan struct{} -} - -// applierV3 is the interface for processing V3 raft messages -type applierV3 interface { - Apply(r *pb.InternalRaftRequest) *applyResult - - Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) - Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) - DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) - Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) - Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) - - LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) - LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) - - Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error) - - Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) - - AuthEnable() (*pb.AuthEnableResponse, error) - AuthDisable() (*pb.AuthDisableResponse, error) - - UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) -} - -type applierV3backend struct { - s *EtcdServer -} - -func (s *EtcdServer) newApplierV3() applierV3 { - return newAuthApplierV3( - s.AuthStore(), - newQuotaApplierV3(s, &applierV3backend{s}), - ) -} - -func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { - ar := &applyResult{} - - // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls - switch { - case r.Range != nil: - ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range) - case r.Put != nil: - ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put) - case r.DeleteRange != nil: - ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) - case r.Txn != nil: - ar.resp, ar.err = a.s.applyV3.Txn(r.Txn) - case r.Compaction != nil: - ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction) - case r.LeaseGrant != nil: - ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant) - case r.LeaseRevoke != nil: - ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke) - case r.Alarm != nil: - ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm) - case r.Authenticate != nil: - ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate) - case r.AuthEnable != nil: - ar.resp, ar.err = a.s.applyV3.AuthEnable() - case r.AuthDisable != nil: - ar.resp, ar.err = a.s.applyV3.AuthDisable() - case r.AuthUserAdd != nil: - ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd) - case r.AuthUserDelete != nil: - ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete) - case r.AuthUserChangePassword != nil: - ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword) - case r.AuthUserGrantRole != nil: - ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole) - case r.AuthUserGet != nil: - ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet) - case r.AuthUserRevokeRole != nil: - ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole) - case r.AuthRoleAdd != nil: - ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd) - case r.AuthRoleGrantPermission != nil: - ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) - case r.AuthRoleGet != nil: - ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet) - case r.AuthRoleRevokePermission != nil: - ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) - case r.AuthRoleDelete != nil: - ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete) - case r.AuthUserList != nil: - ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList) - case r.AuthRoleList != nil: - ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList) - default: - panic("not implemented") - } - return ar -} - -func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) { - resp = &pb.PutResponse{} - resp.Header = &pb.ResponseHeader{} - - val, leaseID := p.Value, lease.LeaseID(p.Lease) - if txn == nil { - if leaseID != lease.NoLease { - if l := a.s.lessor.Lookup(leaseID); l == nil { - return nil, lease.ErrLeaseNotFound - } - } - txn = a.s.KV().Write() - defer txn.End() - } - - var rr *mvcc.RangeResult - if p.IgnoreValue || p.IgnoreLease || p.PrevKv { - rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } - if p.IgnoreValue || p.IgnoreLease { - if rr == nil || len(rr.KVs) == 0 { - // ignore_{lease,value} flag expects previous key-value pair - return nil, ErrKeyNotFound - } - } - if p.IgnoreValue { - val = rr.KVs[0].Value - } - if p.IgnoreLease { - leaseID = lease.LeaseID(rr.KVs[0].Lease) - } - if p.PrevKv { - if rr != nil && len(rr.KVs) != 0 { - resp.PrevKv = &rr.KVs[0] - } - } - - resp.Header.Revision = txn.Put(p.Key, val, leaseID) - return resp, nil -} - -func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - resp := &pb.DeleteRangeResponse{} - resp.Header = &pb.ResponseHeader{} - - if txn == nil { - txn = a.s.kv.Write() - defer txn.End() - } - - if isGteRange(dr.RangeEnd) { - dr.RangeEnd = []byte{} - } - - if dr.PrevKv { - rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - if rr != nil { - for i := range rr.KVs { - resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) - } - } - } - - resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd) - return resp, nil -} - -func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { - resp := &pb.RangeResponse{} - resp.Header = &pb.ResponseHeader{} - - if txn == nil { - txn = a.s.kv.Read() - defer txn.End() - } - - if isGteRange(r.RangeEnd) { - r.RangeEnd = []byte{} - } - - limit := r.Limit - if r.SortOrder != pb.RangeRequest_NONE || - r.MinModRevision != 0 || r.MaxModRevision != 0 || - r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 { - // fetch everything; sort and truncate afterwards - limit = 0 - } - if limit > 0 { - // fetch one extra for 'more' flag - limit = limit + 1 - } - - ro := mvcc.RangeOptions{ - Limit: limit, - Rev: r.Revision, - Count: r.CountOnly, - } - - rr, err := txn.Range(r.Key, r.RangeEnd, ro) - if err != nil { - return nil, err - } - - if r.MaxModRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision } - pruneKVs(rr, f) - } - if r.MinModRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision } - pruneKVs(rr, f) - } - if r.MaxCreateRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision } - pruneKVs(rr, f) - } - if r.MinCreateRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision } - pruneKVs(rr, f) - } - - sortOrder := r.SortOrder - if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE { - // Since current mvcc.Range implementation returns results - // sorted by keys in lexiographically ascending order, - // sort ASCEND by default only when target is not 'KEY' - sortOrder = pb.RangeRequest_ASCEND - } - if sortOrder != pb.RangeRequest_NONE { - var sorter sort.Interface - switch { - case r.SortTarget == pb.RangeRequest_KEY: - sorter = &kvSortByKey{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_VERSION: - sorter = &kvSortByVersion{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_CREATE: - sorter = &kvSortByCreate{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_MOD: - sorter = &kvSortByMod{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_VALUE: - sorter = &kvSortByValue{&kvSort{rr.KVs}} - } - switch { - case sortOrder == pb.RangeRequest_ASCEND: - sort.Sort(sorter) - case sortOrder == pb.RangeRequest_DESCEND: - sort.Sort(sort.Reverse(sorter)) - } - } - - if r.Limit > 0 && len(rr.KVs) > int(r.Limit) { - rr.KVs = rr.KVs[:r.Limit] - resp.More = true - } - - resp.Header.Revision = rr.Rev - resp.Count = int64(rr.Count) - for i := range rr.KVs { - if r.KeysOnly { - rr.KVs[i].Value = nil - } - resp.Kvs = append(resp.Kvs, &rr.KVs[i]) - } - return resp, nil -} - -func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - isWrite := !isTxnReadonly(rt) - txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read()) - - reqs, ok := a.compareToOps(txn, rt) - if isWrite { - if err := a.checkRequestPut(txn, reqs); err != nil { - txn.End() - return nil, err - } - } - if err := checkRequestRange(txn, reqs); err != nil { - txn.End() - return nil, err - } - - resps := make([]*pb.ResponseOp, len(reqs)) - txnResp := &pb.TxnResponse{ - Responses: resps, - Succeeded: ok, - Header: &pb.ResponseHeader{}, - } - - // When executing mutable txn ops, etcd must hold the txn lock so - // readers do not see any intermediate results. Since writes are - // serialized on the raft loop, the revision in the read view will - // be the revision of the write txn. - if isWrite { - txn.End() - txn = a.s.KV().Write() - } - for i := range reqs { - resps[i] = a.applyUnion(txn, reqs[i]) - } - rev := txn.Rev() - if len(txn.Changes()) != 0 { - rev++ - } - txn.End() - - txnResp.Header.Revision = rev - return txnResp, nil -} - -func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) { - for _, c := range rt.Compare { - if !applyCompare(rv, c) { - return rt.Failure, false - } - } - return rt.Success, true -} - -// applyCompare applies the compare request. -// If the comparison succeeds, it returns true. Otherwise, returns false. -func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { - rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return false - } - var ckv mvccpb.KeyValue - if len(rr.KVs) != 0 { - ckv = rr.KVs[0] - } else { - // Use the zero value of ckv normally. However... - if c.Target == pb.Compare_VALUE { - // Always fail if we're comparing a value on a key that doesn't exist. - // We can treat non-existence as the empty set explicitly, such that - // even a key with a value of length 0 bytes is still a real key - // that was written that way - return false - } - } - - // -1 is less, 0 is equal, 1 is greater - var result int - switch c.Target { - case pb.Compare_VALUE: - tv, _ := c.TargetUnion.(*pb.Compare_Value) - if tv != nil { - result = bytes.Compare(ckv.Value, tv.Value) - } - case pb.Compare_CREATE: - tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision) - if tv != nil { - result = compareInt64(ckv.CreateRevision, tv.CreateRevision) - } - - case pb.Compare_MOD: - tv, _ := c.TargetUnion.(*pb.Compare_ModRevision) - if tv != nil { - result = compareInt64(ckv.ModRevision, tv.ModRevision) - } - case pb.Compare_VERSION: - tv, _ := c.TargetUnion.(*pb.Compare_Version) - if tv != nil { - result = compareInt64(ckv.Version, tv.Version) - } - } - - switch c.Result { - case pb.Compare_EQUAL: - return result == 0 - case pb.Compare_NOT_EQUAL: - return result != 0 - case pb.Compare_GREATER: - return result > 0 - case pb.Compare_LESS: - return result < 0 - } - return true -} - -func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp { - switch tv := union.Request.(type) { - case *pb.RequestOp_RequestRange: - if tv.RequestRange != nil { - resp, err := a.Range(txn, tv.RequestRange) - if err != nil { - plog.Panicf("unexpected error during txn: %v", err) - } - return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}} - } - case *pb.RequestOp_RequestPut: - if tv.RequestPut != nil { - resp, err := a.Put(txn, tv.RequestPut) - if err != nil { - plog.Panicf("unexpected error during txn: %v", err) - } - return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}} - } - case *pb.RequestOp_RequestDeleteRange: - if tv.RequestDeleteRange != nil { - resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) - if err != nil { - plog.Panicf("unexpected error during txn: %v", err) - } - return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}} - } - default: - // empty union - return nil - } - return nil - -} - -func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) { - resp := &pb.CompactionResponse{} - resp.Header = &pb.ResponseHeader{} - ch, err := a.s.KV().Compact(compaction.Revision) - if err != nil { - return nil, ch, err - } - // get the current revision. which key to get is not important. - rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{}) - resp.Header.Revision = rr.Rev - return resp, ch, err -} - -func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) - resp := &pb.LeaseGrantResponse{} - if err == nil { - resp.ID = int64(l.ID) - resp.TTL = l.TTL() - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - err := a.s.lessor.Revoke(lease.LeaseID(lc.ID)) - return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err -} - -func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { - resp := &pb.AlarmResponse{} - oldCount := len(a.s.alarmStore.Get(ar.Alarm)) - - switch ar.Action { - case pb.AlarmRequest_GET: - resp.Alarms = a.s.alarmStore.Get(ar.Alarm) - case pb.AlarmRequest_ACTIVATE: - m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm) - if m == nil { - break - } - resp.Alarms = append(resp.Alarms, m) - activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1 - if !activated { - break - } - - switch m.Alarm { - case pb.AlarmType_NOSPACE: - plog.Warningf("alarm raised %+v", m) - a.s.applyV3 = newApplierV3Capped(a) - default: - plog.Errorf("unimplemented alarm activation (%+v)", m) - } - case pb.AlarmRequest_DEACTIVATE: - m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) - if m == nil { - break - } - resp.Alarms = append(resp.Alarms, m) - deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0 - if !deactivated { - break - } - - switch m.Alarm { - case pb.AlarmType_NOSPACE: - plog.Infof("alarm disarmed %+v", ar) - a.s.applyV3 = a.s.newApplierV3() - default: - plog.Errorf("unimplemented alarm deactivation (%+v)", m) - } - default: - return nil, nil - } - return resp, nil -} - -type applierV3Capped struct { - applierV3 - q backendQuota -} - -// newApplierV3Capped creates an applyV3 that will reject Puts and transactions -// with Puts so that the number of keys in the store is capped. -func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } - -func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { - return nil, ErrNoSpace -} - -func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) { - if a.q.Cost(r) > 0 { - return nil, ErrNoSpace - } - return a.applierV3.Txn(r) -} - -func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, ErrNoSpace -} - -func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { - err := a.s.AuthStore().AuthEnable() - if err != nil { - return nil, err - } - return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil -} - -func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { - a.s.AuthStore().AuthDisable() - return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil -} - -func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { - ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) - resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := a.s.AuthStore().UserAdd(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := a.s.AuthStore().UserDelete(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := a.s.AuthStore().UserChangePassword(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := a.s.AuthStore().UserGrantRole(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := a.s.AuthStore().UserGet(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := a.s.AuthStore().UserRevokeRole(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := a.s.AuthStore().RoleAdd(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := a.s.AuthStore().RoleGrantPermission(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := a.s.AuthStore().RoleGet(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := a.s.AuthStore().RoleRevokePermission(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := a.s.AuthStore().RoleDelete(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := a.s.AuthStore().UserList(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := a.s.AuthStore().RoleList(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -type quotaApplierV3 struct { - applierV3 - q Quota -} - -func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { - return "aApplierV3{app, NewBackendQuota(s)} -} - -func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { - ok := a.q.Available(p) - resp, err := a.applierV3.Put(txn, p) - if err == nil && !ok { - err = ErrNoSpace - } - return resp, err -} - -func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - ok := a.q.Available(rt) - resp, err := a.applierV3.Txn(rt) - if err == nil && !ok { - err = ErrNoSpace - } - return resp, err -} - -func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - ok := a.q.Available(lc) - resp, err := a.applierV3.LeaseGrant(lc) - if err == nil && !ok { - err = ErrNoSpace - } - return resp, err -} - -type kvSort struct{ kvs []mvccpb.KeyValue } - -func (s *kvSort) Swap(i, j int) { - t := s.kvs[i] - s.kvs[i] = s.kvs[j] - s.kvs[j] = t -} -func (s *kvSort) Len() int { return len(s.kvs) } - -type kvSortByKey struct{ *kvSort } - -func (s *kvSortByKey) Less(i, j int) bool { - return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0 -} - -type kvSortByVersion struct{ *kvSort } - -func (s *kvSortByVersion) Less(i, j int) bool { - return (s.kvs[i].Version - s.kvs[j].Version) < 0 -} - -type kvSortByCreate struct{ *kvSort } - -func (s *kvSortByCreate) Less(i, j int) bool { - return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0 -} - -type kvSortByMod struct{ *kvSort } - -func (s *kvSortByMod) Less(i, j int) bool { - return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0 -} - -type kvSortByValue struct{ *kvSort } - -func (s *kvSortByValue) Less(i, j int) bool { - return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 -} - -func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error { - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestPut) - if !ok { - continue - } - preq := tv.RequestPut - if preq == nil { - continue - } - if preq.IgnoreValue || preq.IgnoreLease { - // expects previous key-value, error if not exist - rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return err - } - if rr == nil || len(rr.KVs) == 0 { - return ErrKeyNotFound - } - } - if lease.LeaseID(preq.Lease) == lease.NoLease { - continue - } - if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil { - return lease.ErrLeaseNotFound - } - } - return nil -} - -func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error { - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestRange) - if !ok { - continue - } - greq := tv.RequestRange - if greq == nil || greq.Revision == 0 { - continue - } - - if greq.Revision > rv.Rev() { - return mvcc.ErrFutureRev - } - if greq.Revision < rv.FirstRev() { - return mvcc.ErrCompacted - } - } - return nil -} - -func compareInt64(a, b int64) int { - switch { - case a < b: - return -1 - case a > b: - return 1 - default: - return 0 - } -} - -// isGteRange determines if the range end is a >= range. This works around grpc -// sending empty byte strings as nil; >= is encoded in the range end as '\0'. -func isGteRange(rangeEnd []byte) bool { - return len(rangeEnd) == 1 && rangeEnd[0] == 0 -} - -func noSideEffect(r *pb.InternalRaftRequest) bool { - return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil -} - -func removeNeedlessRangeReqs(txn *pb.TxnRequest) { - f := func(ops []*pb.RequestOp) []*pb.RequestOp { - j := 0 - for i := 0; i < len(ops); i++ { - if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok { - continue - } - ops[j] = ops[i] - j++ - } - - return ops[:j] - } - - txn.Success = f(txn.Success) - txn.Failure = f(txn.Failure) -} - -func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) { - j := 0 - for i := range rr.KVs { - rr.KVs[j] = rr.KVs[i] - if !isPrunable(&rr.KVs[i]) { - j++ - } - } - rr.KVs = rr.KVs[:j] -} - -func newHeader(s *EtcdServer) *pb.ResponseHeader { - return &pb.ResponseHeader{ - ClusterId: uint64(s.Cluster().ID()), - MemberId: uint64(s.ID()), - Revision: s.KV().Rev(), - RaftTerm: s.Term(), - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go deleted file mode 100644 index 7da4ae45d..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "sync" - - "github.com/coreos/etcd/auth" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" -) - -type authApplierV3 struct { - applierV3 - as auth.AuthStore - - // mu serializes Apply so that user isn't corrupted and so that - // serialized requests don't leak data from TOCTOU errors - mu sync.Mutex - - authInfo auth.AuthInfo -} - -func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 { - return &authApplierV3{applierV3: base, as: as} -} - -func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult { - aa.mu.Lock() - defer aa.mu.Unlock() - if r.Header != nil { - // backward-compatible with pre-3.0 releases when internalRaftRequest - // does not have header field - aa.authInfo.Username = r.Header.Username - aa.authInfo.Revision = r.Header.AuthRevision - } - if needAdminPermission(r) { - if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil { - aa.authInfo.Username = "" - aa.authInfo.Revision = 0 - return &applyResult{err: err} - } - } - ret := aa.applierV3.Apply(r) - aa.authInfo.Username = "" - aa.authInfo.Revision = 0 - return ret -} - -func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) { - if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { - return nil, err - } - if r.PrevKv { - err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil) - if err != nil { - return nil, err - } - } - return aa.applierV3.Put(txn, r) -} - -func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { - return nil, err - } - return aa.applierV3.Range(txn, r) -} - -func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { - return nil, err - } - if r.PrevKv { - err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd) - if err != nil { - return nil, err - } - } - - return aa.applierV3.DeleteRange(txn, r) -} - -func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { - for _, requ := range reqs { - switch tv := requ.Request.(type) { - case *pb.RequestOp_RequestRange: - if tv.RequestRange == nil { - continue - } - - if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil { - return err - } - - case *pb.RequestOp_RequestPut: - if tv.RequestPut == nil { - continue - } - - if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil { - return err - } - - case *pb.RequestOp_RequestDeleteRange: - if tv.RequestDeleteRange == nil { - continue - } - - if tv.RequestDeleteRange.PrevKv { - err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) - if err != nil { - return err - } - } - - err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) - if err != nil { - return err - } - } - } - - return nil -} - -func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { - for _, c := range rt.Compare { - if err := as.IsRangePermitted(ai, c.Key, nil); err != nil { - return err - } - } - if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil { - return err - } - if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil { - return err - } - return nil -} - -func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil { - return nil, err - } - return aa.applierV3.Txn(rt) -} - -func needAdminPermission(r *pb.InternalRaftRequest) bool { - switch { - case r.AuthEnable != nil: - return true - case r.AuthDisable != nil: - return true - case r.AuthUserAdd != nil: - return true - case r.AuthUserDelete != nil: - return true - case r.AuthUserChangePassword != nil: - return true - case r.AuthUserGrantRole != nil: - return true - case r.AuthUserGet != nil: - return true - case r.AuthUserRevokeRole != nil: - return true - case r.AuthRoleAdd != nil: - return true - case r.AuthRoleGrantPermission != nil: - return true - case r.AuthRoleGet != nil: - return true - case r.AuthRoleRevokePermission != nil: - return true - case r.AuthRoleDelete != nil: - return true - case r.AuthUserList != nil: - return true - case r.AuthRoleList != nil: - return true - default: - return false - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go deleted file mode 100644 index f278efca8..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "path" - "time" - - "github.com/coreos/etcd/etcdserver/api" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/store" - "github.com/coreos/go-semver/semver" -) - -// ApplierV2 is the interface for processing V2 raft messages -type ApplierV2 interface { - Delete(r *pb.Request) Response - Post(r *pb.Request) Response - Put(r *pb.Request) Response - QGet(r *pb.Request) Response - Sync(r *pb.Request) Response -} - -func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 { - return &applierV2store{store: s, cluster: c} -} - -type applierV2store struct { - store store.Store - cluster *membership.RaftCluster -} - -func (a *applierV2store) Delete(r *pb.Request) Response { - switch { - case r.PrevIndex > 0 || r.PrevValue != "": - return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex)) - default: - return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive)) - } -} - -func (a *applierV2store) Post(r *pb.Request) Response { - return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, toTTLOptions(r))) -} - -func (a *applierV2store) Put(r *pb.Request) Response { - ttlOptions := toTTLOptions(r) - exists, existsSet := pbutil.GetBool(r.PrevExist) - switch { - case existsSet: - if exists { - if r.PrevIndex == 0 && r.PrevValue == "" { - return toResponse(a.store.Update(r.Path, r.Val, ttlOptions)) - } - return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions)) - } - return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions)) - case r.PrevIndex > 0 || r.PrevValue != "": - return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions)) - default: - if storeMemberAttributeRegexp.MatchString(r.Path) { - id := membership.MustParseMemberIDFromKey(path.Dir(r.Path)) - var attr membership.Attributes - if err := json.Unmarshal([]byte(r.Val), &attr); err != nil { - plog.Panicf("unmarshal %s should never fail: %v", r.Val, err) - } - if a.cluster != nil { - a.cluster.UpdateAttributes(id, attr) - } - // return an empty response since there is no consumer. - return Response{} - } - if r.Path == membership.StoreClusterVersionKey() { - if a.cluster != nil { - a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability) - } - // return an empty response since there is no consumer. - return Response{} - } - return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions)) - } -} - -func (a *applierV2store) QGet(r *pb.Request) Response { - return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted)) -} - -func (a *applierV2store) Sync(r *pb.Request) Response { - a.store.DeleteExpiredKeys(time.Unix(0, r.Time)) - return Response{} -} - -// applyV2Request interprets r as a call to store.X and returns a Response interpreted -// from store.Event -func (s *EtcdServer) applyV2Request(r *pb.Request) Response { - toTTLOptions(r) - switch r.Method { - case "POST": - return s.applyV2.Post(r) - case "PUT": - return s.applyV2.Put(r) - case "DELETE": - return s.applyV2.Delete(r) - case "QGET": - return s.applyV2.QGet(r) - case "SYNC": - return s.applyV2.Sync(r) - default: - // This should never be reached, but just in case: - return Response{err: ErrUnknownMethod} - } -} - -func toTTLOptions(r *pb.Request) store.TTLOptionSet { - refresh, _ := pbutil.GetBool(r.Refresh) - ttlOptions := store.TTLOptionSet{Refresh: refresh} - if r.Expiration != 0 { - ttlOptions.ExpireTime = time.Unix(0, r.Expiration) - } - return ttlOptions -} - -func toResponse(ev *store.Event, err error) Response { - return Response{Event: ev, err: err} -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/github.com/coreos/etcd/etcdserver/backend.go deleted file mode 100644 index c5e2dabf3..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/backend.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "fmt" - "os" - "time" - - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" -) - -func newBackend(cfg *ServerConfig) backend.Backend { - bcfg := backend.DefaultBackendConfig() - bcfg.Path = cfg.backendPath() - if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { - // permit 10% excess over quota for disarm - bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) - } - return backend.New(bcfg) -} - -// openSnapshotBackend renames a snapshot db to the current etcd db and opens it. -func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) { - snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) - if err != nil { - return nil, fmt.Errorf("database snapshot file path error: %v", err) - } - if err := os.Rename(snapPath, cfg.backendPath()); err != nil { - return nil, fmt.Errorf("rename snapshot file error: %v", err) - } - return openBackend(cfg), nil -} - -// openBackend returns a backend using the current etcd db. -func openBackend(cfg *ServerConfig) backend.Backend { - fn := cfg.backendPath() - beOpened := make(chan backend.Backend) - go func() { - beOpened <- newBackend(cfg) - }() - select { - case be := <-beOpened: - return be - case <-time.After(time.Second): - plog.Warningf("another etcd process is using %q and holds the file lock.", fn) - plog.Warningf("waiting for it to exit before starting...") - } - return <-beOpened -} - -// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes -// before updating the backend db after persisting raft snapshot to disk, -// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this -// case, replace the db with the snapshot db sent by the leader. -func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) { - var cIndex consistentIndex - kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex) - defer kv.Close() - if snapshot.Metadata.Index <= kv.ConsistentIndex() { - return oldbe, nil - } - oldbe.Close() - return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go deleted file mode 100644 index f44862a46..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "sort" - "time" - - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/version" - "github.com/coreos/go-semver/semver" -) - -// isMemberBootstrapped tries to check if the given member has been bootstrapped -// in the given cluster. -func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool { - rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt) - if err != nil { - return false - } - id := cl.MemberByName(member).ID - m := rcl.Member(id) - if m == nil { - return false - } - if len(m.ClientURLs) > 0 { - return true - } - return false -} - -// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and -// attempts to construct a Cluster by accessing the members endpoint on one of -// these URLs. The first URL to provide a response is used. If no URLs provide -// a response, or a Cluster cannot be successfully created from a received -// response, an error is returned. -// Each request has a 10-second timeout. Because the upper limit of TTL is 5s, -// 10 second is enough for building connection and finishing request. -func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) { - return getClusterFromRemotePeers(urls, 10*time.Second, true, rt) -} - -// If logerr is true, it prints out more error messages. -func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) { - cc := &http.Client{ - Transport: rt, - Timeout: timeout, - } - for _, u := range urls { - resp, err := cc.Get(u + "/members") - if err != nil { - if logerr { - plog.Warningf("could not get cluster response from %s: %v", u, err) - } - continue - } - b, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - if logerr { - plog.Warningf("could not read the body of cluster response: %v", err) - } - continue - } - var membs []*membership.Member - if err = json.Unmarshal(b, &membs); err != nil { - if logerr { - plog.Warningf("could not unmarshal cluster response: %v", err) - } - continue - } - id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID")) - if err != nil { - if logerr { - plog.Warningf("could not parse the cluster ID from cluster res: %v", err) - } - continue - } - - // check the length of membership members - // if the membership members are present then prepare and return raft cluster - // if membership members are not present then the raft cluster formed will be - // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error - if len(membs) > 0 { - return membership.NewClusterFromMembers("", id, membs), nil - } - - return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.") - } - return nil, fmt.Errorf("could not retrieve cluster information from the given urls") -} - -// getRemotePeerURLs returns peer urls of remote members in the cluster. The -// returned list is sorted in ascending lexicographical order. -func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string { - us := make([]string, 0) - for _, m := range cl.Members() { - if m.Name == local { - continue - } - us = append(us, m.PeerURLs...) - } - sort.Strings(us) - return us -} - -// getVersions returns the versions of the members in the given cluster. -// The key of the returned map is the member's ID. The value of the returned map -// is the semver versions string, including server and cluster. -// If it fails to get the version of a member, the key will be nil. -func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { - members := cl.Members() - vers := make(map[string]*version.Versions) - for _, m := range members { - if m.ID == local { - cv := "not_decided" - if cl.Version() != nil { - cv = cl.Version().String() - } - vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv} - continue - } - ver, err := getVersion(m, rt) - if err != nil { - plog.Warningf("cannot get the version of member %s (%v)", m.ID, err) - vers[m.ID.String()] = nil - } else { - vers[m.ID.String()] = ver - } - } - return vers -} - -// decideClusterVersion decides the cluster version based on the versions map. -// The returned version is the min server version in the map, or nil if the min -// version in unknown. -func decideClusterVersion(vers map[string]*version.Versions) *semver.Version { - var cv *semver.Version - lv := semver.Must(semver.NewVersion(version.Version)) - - for mid, ver := range vers { - if ver == nil { - return nil - } - v, err := semver.NewVersion(ver.Server) - if err != nil { - plog.Errorf("cannot understand the version of member %s (%v)", mid, err) - return nil - } - if lv.LessThan(*v) { - plog.Warningf("the local etcd version %s is not up-to-date", lv.String()) - plog.Warningf("member %s has a higher version %s", mid, ver.Server) - } - if cv == nil { - cv = v - } else if v.LessThan(*cv) { - cv = v - } - } - return cv -} - -// isCompatibleWithCluster return true if the local member has a compatible version with -// the current running cluster. -// The version is considered as compatible when at least one of the other members in the cluster has a -// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version -// out of the range. -// We set this rule since when the local member joins, another member might be offline. -func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { - vers := getVersions(cl, local, rt) - minV := semver.Must(semver.NewVersion(version.MinClusterVersion)) - maxV := semver.Must(semver.NewVersion(version.Version)) - maxV = &semver.Version{ - Major: maxV.Major, - Minor: maxV.Minor, - } - - return isCompatibleWithVers(vers, local, minV, maxV) -} - -func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool { - var ok bool - for id, v := range vers { - // ignore comparison with local version - if id == local.String() { - continue - } - if v == nil { - continue - } - clusterv, err := semver.NewVersion(v.Cluster) - if err != nil { - plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err) - continue - } - if clusterv.LessThan(*minV) { - plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String()) - return false - } - if maxV.LessThan(*clusterv) { - plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String()) - return false - } - ok = true - } - return ok -} - -// getVersion returns the Versions of the given member via its -// peerURLs. Returns the last error if it fails to get the version. -func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) { - cc := &http.Client{ - Transport: rt, - } - var ( - err error - resp *http.Response - ) - - for _, u := range m.PeerURLs { - resp, err = cc.Get(u + "/version") - if err != nil { - plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) - continue - } - var b []byte - b, err = ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err) - continue - } - var vers version.Versions - if err = json.Unmarshal(b, &vers); err != nil { - plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err) - continue - } - return &vers, nil - } - return nil, err -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go deleted file mode 100644 index 9c258934a..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/config.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "fmt" - "path/filepath" - "sort" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/pkg/netutil" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" -) - -// ServerConfig holds the configuration of etcd as taken from the command line or discovery. -type ServerConfig struct { - Name string - DiscoveryURL string - DiscoveryProxy string - ClientURLs types.URLs - PeerURLs types.URLs - DataDir string - // DedicatedWALDir config will make the etcd to write the WAL to the WALDir - // rather than the dataDir/member/wal. - DedicatedWALDir string - SnapCount uint64 - MaxSnapFiles uint - MaxWALFiles uint - InitialPeerURLsMap types.URLsMap - InitialClusterToken string - NewCluster bool - ForceNewCluster bool - PeerTLSInfo transport.TLSInfo - - TickMs uint - ElectionTicks int - BootstrapTimeout time.Duration - - AutoCompactionRetention int - QuotaBackendBytes int64 - - StrictReconfigCheck bool - - // ClientCertAuthEnabled is true when cert has been signed by the client CA. - ClientCertAuthEnabled bool - - AuthToken string -} - -// VerifyBootstrap sanity-checks the initial config for bootstrap case -// and returns an error for things that should never happen. -func (c *ServerConfig) VerifyBootstrap() error { - if err := c.hasLocalMember(); err != nil { - return err - } - if err := c.advertiseMatchesCluster(); err != nil { - return err - } - if checkDuplicateURL(c.InitialPeerURLsMap) { - return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap) - } - if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" { - return fmt.Errorf("initial cluster unset and no discovery URL found") - } - return nil -} - -// VerifyJoinExisting sanity-checks the initial config for join existing cluster -// case and returns an error for things that should never happen. -func (c *ServerConfig) VerifyJoinExisting() error { - // The member has announced its peer urls to the cluster before starting; no need to - // set the configuration again. - if err := c.hasLocalMember(); err != nil { - return err - } - if checkDuplicateURL(c.InitialPeerURLsMap) { - return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap) - } - if c.DiscoveryURL != "" { - return fmt.Errorf("discovery URL should not be set when joining existing initial cluster") - } - return nil -} - -// hasLocalMember checks that the cluster at least contains the local server. -func (c *ServerConfig) hasLocalMember() error { - if urls := c.InitialPeerURLsMap[c.Name]; urls == nil { - return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name) - } - return nil -} - -// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list. -func (c *ServerConfig) advertiseMatchesCluster() error { - urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice() - urls.Sort() - sort.Strings(apurls) - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - defer cancel() - if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) { - umap := map[string]types.URLs{c.Name: c.PeerURLs} - return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ",")) - } - return nil -} - -func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") } - -func (c *ServerConfig) WALDir() string { - if c.DedicatedWALDir != "" { - return c.DedicatedWALDir - } - return filepath.Join(c.MemberDir(), "wal") -} - -func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") } - -func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" } - -// ReqTimeout returns timeout for request to finish. -func (c *ServerConfig) ReqTimeout() time.Duration { - // 5s for queue waiting, computation and disk IO delay - // + 2 * election timeout for possible leader election - return 5*time.Second + 2*time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond -} - -func (c *ServerConfig) electionTimeout() time.Duration { - return time.Duration(c.ElectionTicks) * time.Duration(c.TickMs) * time.Millisecond -} - -func (c *ServerConfig) peerDialTimeout() time.Duration { - // 1s for queue wait and system delay - // + one RTT, which is smaller than 1/5 election timeout - return time.Second + time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond/5 -} - -func (c *ServerConfig) PrintWithInitial() { c.print(true) } - -func (c *ServerConfig) Print() { c.print(false) } - -func (c *ServerConfig) print(initial bool) { - plog.Infof("name = %s", c.Name) - if c.ForceNewCluster { - plog.Infof("force new cluster") - } - plog.Infof("data dir = %s", c.DataDir) - plog.Infof("member dir = %s", c.MemberDir()) - if c.DedicatedWALDir != "" { - plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir) - } - plog.Infof("heartbeat = %dms", c.TickMs) - plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs)) - plog.Infof("snapshot count = %d", c.SnapCount) - if len(c.DiscoveryURL) != 0 { - plog.Infof("discovery URL= %s", c.DiscoveryURL) - if len(c.DiscoveryProxy) != 0 { - plog.Infof("discovery proxy = %s", c.DiscoveryProxy) - } - } - plog.Infof("advertise client URLs = %s", c.ClientURLs) - if initial { - plog.Infof("initial advertise peer URLs = %s", c.PeerURLs) - plog.Infof("initial cluster = %s", c.InitialPeerURLsMap) - } -} - -func checkDuplicateURL(urlsmap types.URLsMap) bool { - um := make(map[string]bool) - for _, urls := range urlsmap { - for _, url := range urls { - u := url.String() - if um[u] { - return true - } - um[u] = true - } - } - return false -} - -func (c *ServerConfig) bootstrapTimeout() time.Duration { - if c.BootstrapTimeout != 0 { - return c.BootstrapTimeout - } - return time.Second -} - -func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") } diff --git a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go b/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go deleted file mode 100644 index d513f6708..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "sync/atomic" -) - -// consistentIndex represents the offset of an entry in a consistent replica log. -// It implements the mvcc.ConsistentIndexGetter interface. -// It is always set to the offset of current entry before executing the entry, -// so ConsistentWatchableKV could get the consistent index from it. -type consistentIndex uint64 - -func (i *consistentIndex) setConsistentIndex(v uint64) { - atomic.StoreUint64((*uint64)(i), v) -} - -func (i *consistentIndex) ConsistentIndex() uint64 { - return atomic.LoadUint64((*uint64)(i)) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/doc.go b/vendor/github.com/coreos/etcd/etcdserver/doc.go deleted file mode 100644 index b195d2d16..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package etcdserver defines how etcd servers interact and store their states. -package etcdserver diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/github.com/coreos/etcd/etcdserver/errors.go deleted file mode 100644 index ed749dbe8..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "errors" - "fmt" -) - -var ( - ErrUnknownMethod = errors.New("etcdserver: unknown method") - ErrStopped = errors.New("etcdserver: server stopped") - ErrCanceled = errors.New("etcdserver: request cancelled") - ErrTimeout = errors.New("etcdserver: request timed out") - ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure") - ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost") - ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") - ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") - ErrNoLeader = errors.New("etcdserver: no leader") - ErrRequestTooLarge = errors.New("etcdserver: request is too large") - ErrNoSpace = errors.New("etcdserver: no space") - ErrTooManyRequests = errors.New("etcdserver: too many requests") - ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") - ErrKeyNotFound = errors.New("etcdserver: key not found") -) - -type DiscoveryError struct { - Op string - Err error -} - -func (e DiscoveryError) Error() string { - return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go deleted file mode 100644 index 90bbd3632..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/metrics.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "time" - - "github.com/coreos/etcd/pkg/runtime" - "github.com/prometheus/client_golang/prometheus" -) - -var ( - hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "has_leader", - Help: "Whether or not a leader exists. 1 is existence, 0 is not.", - }) - leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "leader_changes_seen_total", - Help: "The number of leader changes seen.", - }) - proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_committed_total", - Help: "The total number of consensus proposals committed.", - }) - proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_applied_total", - Help: "The total number of consensus proposals applied.", - }) - proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_pending", - Help: "The current number of pending proposals to commit.", - }) - proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_failed_total", - Help: "The total number of failed proposals seen.", - }) - leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "server", - Name: "lease_expired_total", - Help: "The total number of expired leases.", - }) -) - -func init() { - prometheus.MustRegister(hasLeader) - prometheus.MustRegister(leaderChanges) - prometheus.MustRegister(proposalsCommitted) - prometheus.MustRegister(proposalsApplied) - prometheus.MustRegister(proposalsPending) - prometheus.MustRegister(proposalsFailed) - prometheus.MustRegister(leaseExpired) -} - -func monitorFileDescriptor(done <-chan struct{}) { - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - for { - used, err := runtime.FDUsage() - if err != nil { - plog.Errorf("cannot monitor file descriptor usage (%v)", err) - return - } - limit, err := runtime.FDLimit() - if err != nil { - plog.Errorf("cannot monitor file descriptor usage (%v)", err) - return - } - if used >= limit/5*4 { - plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit) - } - select { - case <-ticker.C: - case <-done: - return - } - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go deleted file mode 100644 index 87126f156..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/quota.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -const ( - // DefaultQuotaBytes is the number of bytes the backend Size may - // consume before exceeding the space quota. - DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB - // MaxQuotaBytes is the maximum number of bytes suggested for a backend - // quota. A larger quota may lead to degraded performance. - MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB -) - -// Quota represents an arbitrary quota against arbitrary requests. Each request -// costs some charge; if there is not enough remaining charge, then there are -// too few resources available within the quota to apply the request. -type Quota interface { - // Available judges whether the given request fits within the quota. - Available(req interface{}) bool - // Cost computes the charge against the quota for a given request. - Cost(req interface{}) int - // Remaining is the amount of charge left for the quota. - Remaining() int64 -} - -type passthroughQuota struct{} - -func (*passthroughQuota) Available(interface{}) bool { return true } -func (*passthroughQuota) Cost(interface{}) int { return 0 } -func (*passthroughQuota) Remaining() int64 { return 1 } - -type backendQuota struct { - s *EtcdServer - maxBackendBytes int64 -} - -const ( - // leaseOverhead is an estimate for the cost of storing a lease - leaseOverhead = 64 - // kvOverhead is an estimate for the cost of storing a key's metadata - kvOverhead = 256 -) - -func NewBackendQuota(s *EtcdServer) Quota { - if s.Cfg.QuotaBackendBytes < 0 { - // disable quotas if negative - plog.Warningf("disabling backend quota") - return &passthroughQuota{} - } - if s.Cfg.QuotaBackendBytes == 0 { - // use default size if no quota size given - return &backendQuota{s, DefaultQuotaBytes} - } - if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { - plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) - } - return &backendQuota{s, s.Cfg.QuotaBackendBytes} -} - -func (b *backendQuota) Available(v interface{}) bool { - // TODO: maybe optimize backend.Size() - return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes -} - -func (b *backendQuota) Cost(v interface{}) int { - switch r := v.(type) { - case *pb.PutRequest: - return costPut(r) - case *pb.TxnRequest: - return costTxn(r) - case *pb.LeaseGrantRequest: - return leaseOverhead - default: - panic("unexpected cost") - } -} - -func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) } - -func costTxnReq(u *pb.RequestOp) int { - r := u.GetRequestPut() - if r == nil { - return 0 - } - return costPut(r) -} - -func costTxn(r *pb.TxnRequest) int { - sizeSuccess := 0 - for _, u := range r.Success { - sizeSuccess += costTxnReq(u) - } - sizeFailure := 0 - for _, u := range r.Failure { - sizeFailure += costTxnReq(u) - } - if sizeFailure > sizeSuccess { - return sizeFailure - } - return sizeSuccess -} - -func (b *backendQuota) Remaining() int64 { - return b.maxBackendBytes - b.s.Backend().Size() -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go deleted file mode 100644 index dcb894f82..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/raft.go +++ /dev/null @@ -1,594 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "expvar" - "sort" - "sync" - "sync/atomic" - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/contention" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/rafthttp" - "github.com/coreos/etcd/wal" - "github.com/coreos/etcd/wal/walpb" - "github.com/coreos/pkg/capnslog" -) - -const ( - // Number of entries for slow follower to catch-up after compacting - // the raft storage entries. - // We expect the follower has a millisecond level latency with the leader. - // The max throughput is around 10K. Keep a 5K entries is enough for helping - // follower to catch up. - numberOfCatchUpEntries = 5000 - - // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value). - // Assuming the RTT is around 10ms, 1MB max size is large enough. - maxSizePerMsg = 1 * 1024 * 1024 - // Never overflow the rafthttp buffer, which is 4096. - // TODO: a better const? - maxInflightMsgs = 4096 / 8 -) - -var ( - // protects raftStatus - raftStatusMu sync.Mutex - // indirection for expvar func interface - // expvar panics when publishing duplicate name - // expvar does not support remove a registered name - // so only register a func that calls raftStatus - // and change raftStatus as we need. - raftStatus func() raft.Status -) - -func init() { - raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft")) - expvar.Publish("raft.status", expvar.Func(func() interface{} { - raftStatusMu.Lock() - defer raftStatusMu.Unlock() - return raftStatus() - })) -} - -type RaftTimer interface { - Index() uint64 - Term() uint64 -} - -// apply contains entries, snapshot to be applied. Once -// an apply is consumed, the entries will be persisted to -// to raft storage concurrently; the application must read -// raftDone before assuming the raft messages are stable. -type apply struct { - entries []raftpb.Entry - snapshot raftpb.Snapshot - // notifyc synchronizes etcd server applies with the raft node - notifyc chan struct{} -} - -type raftNode struct { - // Cache of the latest raft index and raft term the server has seen. - // These three unit64 fields must be the first elements to keep 64-bit - // alignment for atomic access to the fields. - index uint64 - term uint64 - lead uint64 - - raftNodeConfig - - // a chan to send/receive snapshot - msgSnapC chan raftpb.Message - - // a chan to send out apply - applyc chan apply - - // a chan to send out readState - readStateC chan raft.ReadState - - // utility - ticker *time.Ticker - // contention detectors for raft heartbeat message - td *contention.TimeoutDetector - - stopped chan struct{} - done chan struct{} -} - -type raftNodeConfig struct { - // to check if msg receiver is removed from cluster - isIDRemoved func(id uint64) bool - raft.Node - raftStorage *raft.MemoryStorage - storage Storage - heartbeat time.Duration // for logging - // transport specifies the transport to send and receive msgs to members. - // Sending messages MUST NOT block. It is okay to drop messages, since - // clients should timeout and reissue their messages. - // If transport is nil, server will panic. - transport rafthttp.Transporter -} - -func newRaftNode(cfg raftNodeConfig) *raftNode { - r := &raftNode{ - raftNodeConfig: cfg, - // set up contention detectors for raft heartbeat message. - // expect to send a heartbeat within 2 heartbeat intervals. - td: contention.NewTimeoutDetector(2 * cfg.heartbeat), - readStateC: make(chan raft.ReadState, 1), - msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), - applyc: make(chan apply), - stopped: make(chan struct{}), - done: make(chan struct{}), - } - if r.heartbeat == 0 { - r.ticker = &time.Ticker{} - } else { - r.ticker = time.NewTicker(r.heartbeat) - } - return r -} - -// start prepares and starts raftNode in a new goroutine. It is no longer safe -// to modify the fields after it has been started. -func (r *raftNode) start(rh *raftReadyHandler) { - internalTimeout := time.Second - - go func() { - defer r.onStop() - islead := false - - for { - select { - case <-r.ticker.C: - r.Tick() - case rd := <-r.Ready(): - if rd.SoftState != nil { - newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead - if newLeader { - leaderChanges.Inc() - } - - if rd.SoftState.Lead == raft.None { - hasLeader.Set(0) - } else { - hasLeader.Set(1) - } - - atomic.StoreUint64(&r.lead, rd.SoftState.Lead) - islead = rd.RaftState == raft.StateLeader - rh.updateLeadership(newLeader) - r.td.Reset() - } - - if len(rd.ReadStates) != 0 { - select { - case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]: - case <-time.After(internalTimeout): - plog.Warningf("timed out sending read state") - case <-r.stopped: - return - } - } - - notifyc := make(chan struct{}, 1) - ap := apply{ - entries: rd.CommittedEntries, - snapshot: rd.Snapshot, - notifyc: notifyc, - } - - updateCommittedIndex(&ap, rh) - - select { - case r.applyc <- ap: - case <-r.stopped: - return - } - - // the leader can write to its disk in parallel with replicating to the followers and them - // writing to their disks. - // For more details, check raft thesis 10.2.1 - if islead { - // gofail: var raftBeforeLeaderSend struct{} - r.transport.Send(r.processMessages(rd.Messages)) - } - - // gofail: var raftBeforeSave struct{} - if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { - plog.Fatalf("raft save state and entries error: %v", err) - } - if !raft.IsEmptyHardState(rd.HardState) { - proposalsCommitted.Set(float64(rd.HardState.Commit)) - } - // gofail: var raftAfterSave struct{} - - if !raft.IsEmptySnap(rd.Snapshot) { - // gofail: var raftBeforeSaveSnap struct{} - if err := r.storage.SaveSnap(rd.Snapshot); err != nil { - plog.Fatalf("raft save snapshot error: %v", err) - } - // etcdserver now claim the snapshot has been persisted onto the disk - notifyc <- struct{}{} - - // gofail: var raftAfterSaveSnap struct{} - r.raftStorage.ApplySnapshot(rd.Snapshot) - plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) - // gofail: var raftAfterApplySnap struct{} - } - - r.raftStorage.Append(rd.Entries) - - if !islead { - // finish processing incoming messages before we signal raftdone chan - msgs := r.processMessages(rd.Messages) - - // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots - notifyc <- struct{}{} - - // Candidate or follower needs to wait for all pending configuration - // changes to be applied before sending messages. - // Otherwise we might incorrectly count votes (e.g. votes from removed members). - // Also slow machine's follower raft-layer could proceed to become the leader - // on its own single-node cluster, before apply-layer applies the config change. - // We simply wait for ALL pending entries to be applied for now. - // We might improve this later on if it causes unnecessary long blocking issues. - waitApply := false - for _, ent := range rd.CommittedEntries { - if ent.Type == raftpb.EntryConfChange { - waitApply = true - break - } - } - if waitApply { - // blocks until 'applyAll' calls 'applyWait.Trigger' - // to be in sync with scheduled config-change job - // (assume notifyc has cap of 1) - select { - case notifyc <- struct{}{}: - case <-r.stopped: - return - } - } - - // gofail: var raftBeforeFollowerSend struct{} - r.transport.Send(msgs) - } else { - // leader already processed 'MsgSnap' and signaled - notifyc <- struct{}{} - } - - r.Advance() - case <-r.stopped: - return - } - } - }() -} - -func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { - var ci uint64 - if len(ap.entries) != 0 { - ci = ap.entries[len(ap.entries)-1].Index - } - if ap.snapshot.Metadata.Index > ci { - ci = ap.snapshot.Metadata.Index - } - if ci != 0 { - rh.updateCommittedIndex(ci) - } -} - -func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { - sentAppResp := false - for i := len(ms) - 1; i >= 0; i-- { - if r.isIDRemoved(ms[i].To) { - ms[i].To = 0 - } - - if ms[i].Type == raftpb.MsgAppResp { - if sentAppResp { - ms[i].To = 0 - } else { - sentAppResp = true - } - } - - if ms[i].Type == raftpb.MsgSnap { - // There are two separate data store: the store for v2, and the KV for v3. - // The msgSnap only contains the most recent snapshot of store without KV. - // So we need to redirect the msgSnap to etcd server main loop for merging in the - // current store snapshot and KV snapshot. - select { - case r.msgSnapC <- ms[i]: - default: - // drop msgSnap if the inflight chan if full. - } - ms[i].To = 0 - } - if ms[i].Type == raftpb.MsgHeartbeat { - ok, exceed := r.td.Observe(ms[i].To) - if !ok { - // TODO: limit request rate. - plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed) - plog.Warningf("server is likely overloaded") - } - } - } - return ms -} - -func (r *raftNode) apply() chan apply { - return r.applyc -} - -func (r *raftNode) stop() { - r.stopped <- struct{}{} - <-r.done -} - -func (r *raftNode) onStop() { - r.Stop() - r.ticker.Stop() - r.transport.Stop() - if err := r.storage.Close(); err != nil { - plog.Panicf("raft close storage error: %v", err) - } - close(r.done) -} - -// for testing -func (r *raftNode) pauseSending() { - p := r.transport.(rafthttp.Pausable) - p.Pause() -} - -func (r *raftNode) resumeSending() { - p := r.transport.(rafthttp.Pausable) - p.Resume() -} - -// advanceTicksForElection advances ticks to the node for fast election. -// This reduces the time to wait for first leader election if bootstrapping the whole -// cluster, while leaving at least 1 heartbeat for possible existing leader -// to contact it. -func advanceTicksForElection(n raft.Node, electionTicks int) { - for i := 0; i < electionTicks-1; i++ { - n.Tick() - } -} - -func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) { - var err error - member := cl.MemberByName(cfg.Name) - metadata := pbutil.MustMarshal( - &pb.Metadata{ - NodeID: uint64(member.ID), - ClusterID: uint64(cl.ID()), - }, - ) - if w, err = wal.Create(cfg.WALDir(), metadata); err != nil { - plog.Fatalf("create wal error: %v", err) - } - peers := make([]raft.Peer, len(ids)) - for i, id := range ids { - ctx, err := json.Marshal((*cl).Member(id)) - if err != nil { - plog.Panicf("marshal member should never fail: %v", err) - } - peers[i] = raft.Peer{ID: uint64(id), Context: ctx} - } - id = member.ID - plog.Infof("starting member %s in cluster %s", id, cl.ID()) - s = raft.NewMemoryStorage() - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - } - - n = raft.StartNode(c, peers) - raftStatusMu.Lock() - raftStatus = n.Status - raftStatusMu.Unlock() - advanceTicksForElection(n, c.ElectionTick) - return -} - -func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { - var walsnap walpb.Snapshot - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) - - plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit) - cl := membership.NewCluster("") - cl.SetID(cid) - s := raft.NewMemoryStorage() - if snapshot != nil { - s.ApplySnapshot(*snapshot) - } - s.SetHardState(st) - s.Append(ents) - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - } - - n := raft.RestartNode(c) - raftStatusMu.Lock() - raftStatus = n.Status - raftStatusMu.Unlock() - advanceTicksForElection(n, c.ElectionTick) - return id, cl, n, s, w -} - -func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { - var walsnap walpb.Snapshot - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) - - // discard the previously uncommitted entries - for i, ent := range ents { - if ent.Index > st.Commit { - plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i) - ents = ents[:i] - break - } - } - - // force append the configuration change entries - toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit) - ents = append(ents, toAppEnts...) - - // force commit newly appended entries - err := w.Save(raftpb.HardState{}, toAppEnts) - if err != nil { - plog.Fatalf("%v", err) - } - if len(ents) != 0 { - st.Commit = ents[len(ents)-1].Index - } - - plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit) - cl := membership.NewCluster("") - cl.SetID(cid) - s := raft.NewMemoryStorage() - if snapshot != nil { - s.ApplySnapshot(*snapshot) - } - s.SetHardState(st) - s.Append(ents) - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - } - n := raft.RestartNode(c) - raftStatus = n.Status - return id, cl, n, s, w -} - -// getIDs returns an ordered set of IDs included in the given snapshot and -// the entries. The given snapshot/entries can contain two kinds of -// ID-related entry: -// - ConfChangeAddNode, in which case the contained ID will be added into the set. -// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set. -func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { - ids := make(map[uint64]bool) - if snap != nil { - for _, id := range snap.Metadata.ConfState.Nodes { - ids[id] = true - } - } - for _, e := range ents { - if e.Type != raftpb.EntryConfChange { - continue - } - var cc raftpb.ConfChange - pbutil.MustUnmarshal(&cc, e.Data) - switch cc.Type { - case raftpb.ConfChangeAddNode: - ids[cc.NodeID] = true - case raftpb.ConfChangeRemoveNode: - delete(ids, cc.NodeID) - case raftpb.ConfChangeUpdateNode: - // do nothing - default: - plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!") - } - } - sids := make(types.Uint64Slice, 0, len(ids)) - for id := range ids { - sids = append(sids, id) - } - sort.Sort(sids) - return []uint64(sids) -} - -// createConfigChangeEnts creates a series of Raft entries (i.e. -// EntryConfChange) to remove the set of given IDs from the cluster. The ID -// `self` is _not_ removed, even if present in the set. -// If `self` is not inside the given ids, it creates a Raft entry to add a -// default member with the given `self`. -func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry { - ents := make([]raftpb.Entry, 0) - next := index + 1 - found := false - for _, id := range ids { - if id == self { - found = true - continue - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: id, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, - } - ents = append(ents, e) - next++ - } - if !found { - m := membership.Member{ - ID: types.ID(self), - RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, - } - ctx, err := json.Marshal(m) - if err != nil { - plog.Panicf("marshal member should never fail: %v", err) - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: self, - Context: ctx, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, - } - ents = append(ents, e) - } - return ents -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go deleted file mode 100644 index 38a96f719..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/server.go +++ /dev/null @@ -1,1659 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "expvar" - "fmt" - "math" - "math/rand" - "net/http" - "os" - "path" - "regexp" - "sync" - "sync/atomic" - "time" - - "github.com/coreos/etcd/alarm" - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/compactor" - "github.com/coreos/etcd/discovery" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/pkg/idutil" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/runtime" - "github.com/coreos/etcd/pkg/schedule" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/pkg/wait" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/rafthttp" - "github.com/coreos/etcd/snap" - "github.com/coreos/etcd/store" - "github.com/coreos/etcd/version" - "github.com/coreos/etcd/wal" - "github.com/coreos/go-semver/semver" - "github.com/coreos/pkg/capnslog" - "golang.org/x/net/context" -) - -const ( - DefaultSnapCount = 100000 - - StoreClusterPrefix = "/0" - StoreKeysPrefix = "/1" - - // HealthInterval is the minimum time the cluster should be healthy - // before accepting add member requests. - HealthInterval = 5 * time.Second - - purgeFileInterval = 30 * time.Second - // monitorVersionInterval should be smaller than the timeout - // on the connection. Or we will not be able to reuse the connection - // (since it will timeout). - monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second - - // max number of in-flight snapshot messages etcdserver allows to have - // This number is more than enough for most clusters with 5 machines. - maxInFlightMsgSnap = 16 - - releaseDelayAfterSnapshot = 30 * time.Second - - // maxPendingRevokes is the maximum number of outstanding expired lease revocations. - maxPendingRevokes = 16 -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver") - - storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes")) -) - -func init() { - rand.Seed(time.Now().UnixNano()) - - expvar.Publish( - "file_descriptor_limit", - expvar.Func( - func() interface{} { - n, _ := runtime.FDLimit() - return n - }, - ), - ) -} - -type Response struct { - Event *store.Event - Watcher store.Watcher - err error -} - -type Server interface { - // Start performs any initialization of the Server necessary for it to - // begin serving requests. It must be called before Do or Process. - // Start must be non-blocking; any long-running server functionality - // should be implemented in goroutines. - Start() - // Stop terminates the Server and performs any necessary finalization. - // Do and Process cannot be called after Stop has been invoked. - Stop() - // ID returns the ID of the Server. - ID() types.ID - // Leader returns the ID of the leader Server. - Leader() types.ID - // Do takes a request and attempts to fulfill it, returning a Response. - Do(ctx context.Context, r pb.Request) (Response, error) - // Process takes a raft message and applies it to the server's raft state - // machine, respecting any timeout of the given context. - Process(ctx context.Context, m raftpb.Message) error - // AddMember attempts to add a member into the cluster. It will return - // ErrIDRemoved if member ID is removed from the cluster, or return - // ErrIDExists if member ID exists in the cluster. - AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) - // RemoveMember attempts to remove a member from the cluster. It will - // return ErrIDRemoved if member ID is removed from the cluster, or return - // ErrIDNotFound if member ID is not in the cluster. - RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) - - // UpdateMember attempts to update an existing member in the cluster. It will - // return ErrIDNotFound if the member ID does not exist. - UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) - - // ClusterVersion is the cluster-wide minimum major.minor version. - // Cluster version is set to the min version that an etcd member is - // compatible with when first bootstrap. - // - // ClusterVersion is nil until the cluster is bootstrapped (has a quorum). - // - // During a rolling upgrades, the ClusterVersion will be updated - // automatically after a sync. (5 second by default) - // - // The API/raft component can utilize ClusterVersion to determine if - // it can accept a client request or a raft RPC. - // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and - // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since - // this feature is introduced post 2.0. - ClusterVersion() *semver.Version -} - -// EtcdServer is the production implementation of the Server interface -type EtcdServer struct { - // inflightSnapshots holds count the number of snapshots currently inflight. - inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned. - appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. - committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. - // consistIndex used to hold the offset of current executing entry - // It is initialized to 0 before executing any entry. - consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned. - Cfg *ServerConfig - - readych chan struct{} - r raftNode - - snapCount uint64 - - w wait.Wait - - readMu sync.RWMutex - // read routine notifies etcd server that it waits for reading by sending an empty struct to - // readwaitC - readwaitc chan struct{} - // readNotifier is used to notify the read routine that it can process the request - // when there is no error - readNotifier *notifier - - // stop signals the run goroutine should shutdown. - stop chan struct{} - // stopping is closed by run goroutine on shutdown. - stopping chan struct{} - // done is closed when all goroutines from start() complete. - done chan struct{} - - errorc chan error - id types.ID - attributes membership.Attributes - - cluster *membership.RaftCluster - - store store.Store - snapshotter *snap.Snapshotter - - applyV2 ApplierV2 - - // applyV3 is the applier with auth and quotas - applyV3 applierV3 - // applyV3Base is the core applier without auth or quotas - applyV3Base applierV3 - applyWait wait.WaitTime - - kv mvcc.ConsistentWatchableKV - lessor lease.Lessor - bemu sync.Mutex - be backend.Backend - authStore auth.AuthStore - alarmStore *alarm.AlarmStore - - stats *stats.ServerStats - lstats *stats.LeaderStats - - SyncTicker *time.Ticker - // compactor is used to auto-compact the KV. - compactor *compactor.Periodic - - // peerRt used to send requests (version, lease) to peers. - peerRt http.RoundTripper - reqIDGen *idutil.Generator - - // forceVersionC is used to force the version monitor loop - // to detect the cluster version immediately. - forceVersionC chan struct{} - - // wgMu blocks concurrent waitgroup mutation while server stopping - wgMu sync.RWMutex - // wg is used to wait for the go routines that depends on the server state - // to exit when stopping the server. - wg sync.WaitGroup - - // ctx is used for etcd-initiated requests that may need to be canceled - // on etcd server shutdown. - ctx context.Context - cancel context.CancelFunc - - leadTimeMu sync.RWMutex - leadElectedTime time.Time -} - -// NewServer creates a new EtcdServer from the supplied configuration. The -// configuration is considered static for the lifetime of the EtcdServer. -func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { - st := store.New(StoreClusterPrefix, StoreKeysPrefix) - - var ( - w *wal.WAL - n raft.Node - s *raft.MemoryStorage - id types.ID - cl *membership.RaftCluster - ) - - if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { - return nil, fmt.Errorf("cannot access data directory: %v", terr) - } - - haveWAL := wal.Exist(cfg.WALDir()) - - if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil { - plog.Fatalf("create snapshot directory error: %v", err) - } - ss := snap.New(cfg.SnapDir()) - - bepath := cfg.backendPath() - beExist := fileutil.Exist(bepath) - be := openBackend(cfg) - - defer func() { - if err != nil { - be.Close() - } - }() - - prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout()) - if err != nil { - return nil, err - } - var ( - remotes []*membership.Member - snapshot *raftpb.Snapshot - ) - - switch { - case !haveWAL && !cfg.NewCluster: - if err = cfg.VerifyJoinExisting(); err != nil { - return nil, err - } - cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) - if err != nil { - return nil, err - } - existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt) - if gerr != nil { - return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr) - } - if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil { - return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) - } - if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) { - return nil, fmt.Errorf("incompatible with current running cluster") - } - - remotes = existingCluster.Members() - cl.SetID(existingCluster.ID()) - cl.SetStore(st) - cl.SetBackend(be) - cfg.Print() - id, n, s, w = startNode(cfg, cl, nil) - case !haveWAL && cfg.NewCluster: - if err = cfg.VerifyBootstrap(); err != nil { - return nil, err - } - cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) - if err != nil { - return nil, err - } - m := cl.MemberByName(cfg.Name) - if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) { - return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) - } - if cfg.ShouldDiscover() { - var str string - str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) - if err != nil { - return nil, &DiscoveryError{Op: "join", Err: err} - } - var urlsmap types.URLsMap - urlsmap, err = types.NewURLsMap(str) - if err != nil { - return nil, err - } - if checkDuplicateURL(urlsmap) { - return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) - } - if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil { - return nil, err - } - } - cl.SetStore(st) - cl.SetBackend(be) - cfg.PrintWithInitial() - id, n, s, w = startNode(cfg, cl, cl.MemberIDs()) - case haveWAL: - if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { - return nil, fmt.Errorf("cannot write to member directory: %v", err) - } - - if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil { - return nil, fmt.Errorf("cannot write to WAL directory: %v", err) - } - - if cfg.ShouldDiscover() { - plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir()) - } - snapshot, err = ss.Load() - if err != nil && err != snap.ErrNoSnapshot { - return nil, err - } - if snapshot != nil { - if err = st.Recovery(snapshot.Data); err != nil { - plog.Panicf("recovered store from snapshot error: %v", err) - } - plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) - if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil { - plog.Panicf("recovering backend from snapshot error: %v", err) - } - } - cfg.Print() - if !cfg.ForceNewCluster { - id, cl, n, s, w = restartNode(cfg, snapshot) - } else { - id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot) - } - cl.SetStore(st) - cl.SetBackend(be) - cl.Recover(api.UpdateCapability) - if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist { - os.RemoveAll(bepath) - return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath) - } - default: - return nil, fmt.Errorf("unsupported bootstrap config") - } - - if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil { - return nil, fmt.Errorf("cannot access member directory: %v", terr) - } - - sstats := stats.NewServerStats(cfg.Name, id.String()) - lstats := stats.NewLeaderStats(id.String()) - - heartbeat := time.Duration(cfg.TickMs) * time.Millisecond - srv = &EtcdServer{ - readych: make(chan struct{}), - Cfg: cfg, - snapCount: cfg.SnapCount, - errorc: make(chan error, 1), - store: st, - snapshotter: ss, - r: *newRaftNode( - raftNodeConfig{ - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - heartbeat: heartbeat, - raftStorage: s, - storage: NewStorage(w, ss), - }, - ), - id: id, - attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, - cluster: cl, - stats: sstats, - lstats: lstats, - SyncTicker: time.NewTicker(500 * time.Millisecond), - peerRt: prt, - reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), - forceVersionC: make(chan struct{}), - } - - srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster} - - srv.be = be - minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat - - // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. - // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. - srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds()))) - srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex) - if beExist { - kvindex := srv.kv.ConsistentIndex() - // TODO: remove kvindex != 0 checking when we do not expect users to upgrade - // etcd from pre-3.0 release. - if snapshot != nil && kvindex < snapshot.Metadata.Index { - if kvindex != 0 { - return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index) - } - plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) - } - } - newSrv := srv // since srv == nil in defer if srv is returned as nil - defer func() { - // closing backend without first closing kv can cause - // resumed compactions to fail with closed tx errors - if err != nil { - newSrv.kv.Close() - } - }() - - srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) - tp, err := auth.NewTokenProvider(cfg.AuthToken, - func(index uint64) <-chan struct{} { - return srv.applyWait.Wait(index) - }, - ) - if err != nil { - plog.Errorf("failed to create token provider: %s", err) - return nil, err - } - srv.authStore = auth.NewAuthStore(srv.be, tp) - if h := cfg.AutoCompactionRetention; h != 0 { - srv.compactor = compactor.NewPeriodic(h, srv.kv, srv) - srv.compactor.Run() - } - - srv.applyV3Base = &applierV3backend{srv} - if err = srv.restoreAlarms(); err != nil { - return nil, err - } - - // TODO: move transport initialization near the definition of remote - tr := &rafthttp.Transport{ - TLSInfo: cfg.PeerTLSInfo, - DialTimeout: cfg.peerDialTimeout(), - ID: id, - URLs: cfg.PeerURLs, - ClusterID: cl.ID(), - Raft: srv, - Snapshotter: ss, - ServerStats: sstats, - LeaderStats: lstats, - ErrorC: srv.errorc, - } - if err = tr.Start(); err != nil { - return nil, err - } - // add all remotes into transport - for _, m := range remotes { - if m.ID != id { - tr.AddRemote(m.ID, m.PeerURLs) - } - } - for _, m := range cl.Members() { - if m.ID != id { - tr.AddPeer(m.ID, m.PeerURLs) - } - } - srv.r.transport = tr - - return srv, nil -} - -// Start prepares and starts server in a new goroutine. It is no longer safe to -// modify a server's fields after it has been sent to Start. -// It also starts a goroutine to publish its server information. -func (s *EtcdServer) Start() { - s.start() - s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) }) - s.goAttach(s.purgeFile) - s.goAttach(func() { monitorFileDescriptor(s.stopping) }) - s.goAttach(s.monitorVersions) - s.goAttach(s.linearizableReadLoop) -} - -// start prepares and starts server in a new goroutine. It is no longer safe to -// modify a server's fields after it has been sent to Start. -// This function is just used for testing. -func (s *EtcdServer) start() { - if s.snapCount == 0 { - plog.Infof("set snapshot count to default %d", DefaultSnapCount) - s.snapCount = DefaultSnapCount - } - s.w = wait.New() - s.applyWait = wait.NewTimeList() - s.done = make(chan struct{}) - s.stop = make(chan struct{}) - s.stopping = make(chan struct{}) - s.ctx, s.cancel = context.WithCancel(context.Background()) - s.readwaitc = make(chan struct{}, 1) - s.readNotifier = newNotifier() - if s.ClusterVersion() != nil { - plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String())) - } else { - plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version) - } - // TODO: if this is an empty log, writes all peer infos - // into the first entry - go s.run() -} - -func (s *EtcdServer) purgeFile() { - var serrc, werrc <-chan error - if s.Cfg.MaxSnapFiles > 0 { - serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) - } - if s.Cfg.MaxWALFiles > 0 { - werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done) - } - select { - case e := <-werrc: - plog.Fatalf("failed to purge wal file %v", e) - case e := <-serrc: - plog.Fatalf("failed to purge snap file %v", e) - case <-s.stopping: - return - } -} - -func (s *EtcdServer) ID() types.ID { return s.id } - -func (s *EtcdServer) Cluster() *membership.RaftCluster { return s.cluster } - -func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() } - -func (s *EtcdServer) Lessor() lease.Lessor { return s.lessor } - -func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) } - -func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { - if s.cluster.IsIDRemoved(types.ID(m.From)) { - plog.Warningf("reject message from removed member %s", types.ID(m.From).String()) - return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member") - } - if m.Type == raftpb.MsgApp { - s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size()) - } - return s.r.Step(ctx, m) -} - -func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) } - -func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) } - -// ReportSnapshot reports snapshot sent status to the raft state machine, -// and clears the used snapshot from the snapshot store. -func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) { - s.r.ReportSnapshot(id, status) -} - -type etcdProgress struct { - confState raftpb.ConfState - snapi uint64 - appliedt uint64 - appliedi uint64 -} - -// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode, -// and helps decouple state machine logic from Raft algorithms. -// TODO: add a state machine interface to apply the commit entries and do snapshot/recover -type raftReadyHandler struct { - updateLeadership func(newLeader bool) - updateCommittedIndex func(uint64) -} - -func (s *EtcdServer) run() { - sn, err := s.r.raftStorage.Snapshot() - if err != nil { - plog.Panicf("get snapshot from raft storage error: %v", err) - } - - // asynchronously accept apply packets, dispatch progress in-order - sched := schedule.NewFIFOScheduler() - - var ( - smu sync.RWMutex - syncC <-chan time.Time - ) - setSyncC := func(ch <-chan time.Time) { - smu.Lock() - syncC = ch - smu.Unlock() - } - getSyncC := func() (ch <-chan time.Time) { - smu.RLock() - ch = syncC - smu.RUnlock() - return - } - rh := &raftReadyHandler{ - updateLeadership: func(newLeader bool) { - if !s.isLeader() { - if s.lessor != nil { - s.lessor.Demote() - } - if s.compactor != nil { - s.compactor.Pause() - } - setSyncC(nil) - } else { - if newLeader { - t := time.Now() - s.leadTimeMu.Lock() - s.leadElectedTime = t - s.leadTimeMu.Unlock() - } - setSyncC(s.SyncTicker.C) - if s.compactor != nil { - s.compactor.Resume() - } - } - - // TODO: remove the nil checking - // current test utility does not provide the stats - if s.stats != nil { - s.stats.BecomeLeader() - } - }, - updateCommittedIndex: func(ci uint64) { - cci := s.getCommittedIndex() - if ci > cci { - s.setCommittedIndex(ci) - } - }, - } - s.r.start(rh) - - ep := etcdProgress{ - confState: sn.Metadata.ConfState, - snapi: sn.Metadata.Index, - appliedt: sn.Metadata.Term, - appliedi: sn.Metadata.Index, - } - - defer func() { - s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping - close(s.stopping) - s.wgMu.Unlock() - s.cancel() - - sched.Stop() - - // wait for gouroutines before closing raft so wal stays open - s.wg.Wait() - - s.SyncTicker.Stop() - - // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines - // by adding a peer after raft stops the transport - s.r.stop() - - // kv, lessor and backend can be nil if running without v3 enabled - // or running unit tests. - if s.lessor != nil { - s.lessor.Stop() - } - if s.kv != nil { - s.kv.Close() - } - if s.authStore != nil { - s.authStore.Close() - } - if s.be != nil { - s.be.Close() - } - if s.compactor != nil { - s.compactor.Stop() - } - close(s.done) - }() - - var expiredLeaseC <-chan []*lease.Lease - if s.lessor != nil { - expiredLeaseC = s.lessor.ExpiredLeasesC() - } - - for { - select { - case ap := <-s.r.apply(): - f := func(context.Context) { s.applyAll(&ep, &ap) } - sched.Schedule(f) - case leases := <-expiredLeaseC: - s.goAttach(func() { - // Increases throughput of expired leases deletion process through parallelization - c := make(chan struct{}, maxPendingRevokes) - for _, lease := range leases { - select { - case c <- struct{}{}: - case <-s.stopping: - return - } - lid := lease.ID - s.goAttach(func() { - s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) - leaseExpired.Inc() - <-c - }) - } - }) - case err := <-s.errorc: - plog.Errorf("%s", err) - plog.Infof("the data-dir used by this member must be removed.") - return - case <-getSyncC(): - if s.store.HasTTLKeys() { - s.sync(s.Cfg.ReqTimeout()) - } - case <-s.stop: - return - } - } -} - -func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { - s.applySnapshot(ep, apply) - st := time.Now() - s.applyEntries(ep, apply) - d := time.Since(st) - entriesNum := len(apply.entries) - if entriesNum != 0 && d > time.Duration(entriesNum)*warnApplyDuration { - plog.Warningf("apply entries took too long [%v for %d entries]", d, len(apply.entries)) - plog.Warningf("avoid queries with large range/delete range!") - } - proposalsApplied.Set(float64(ep.appliedi)) - s.applyWait.Trigger(ep.appliedi) - // wait for the raft routine to finish the disk writes before triggering a - // snapshot. or applied index might be greater than the last index in raft - // storage, since the raft routine might be slower than apply routine. - <-apply.notifyc - - s.triggerSnapshot(ep) - select { - // snapshot requested via send() - case m := <-s.r.msgSnapC: - merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState) - s.sendMergedSnap(merged) - default: - } -} - -func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { - if raft.IsEmptySnap(apply.snapshot) { - return - } - - plog.Infof("applying snapshot at index %d...", ep.snapi) - defer plog.Infof("finished applying incoming snapshot at index %d", ep.snapi) - - if apply.snapshot.Metadata.Index <= ep.appliedi { - plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1", - apply.snapshot.Metadata.Index, ep.appliedi) - } - - // wait for raftNode to persist snapshot onto the disk - <-apply.notifyc - - newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot) - if err != nil { - plog.Panic(err) - } - - // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. - // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. - if s.lessor != nil { - plog.Info("recovering lessor...") - s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() }) - plog.Info("finished recovering lessor") - } - - plog.Info("restoring mvcc store...") - - if err := s.kv.Restore(newbe); err != nil { - plog.Panicf("restore KV error: %v", err) - } - s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex()) - - plog.Info("finished restoring mvcc store") - - // Closing old backend might block until all the txns - // on the backend are finished. - // We do not want to wait on closing the old backend. - s.bemu.Lock() - oldbe := s.be - go func() { - plog.Info("closing old backend...") - defer plog.Info("finished closing old backend") - - if err := oldbe.Close(); err != nil { - plog.Panicf("close backend error: %v", err) - } - }() - - s.be = newbe - s.bemu.Unlock() - - plog.Info("recovering alarms...") - if err := s.restoreAlarms(); err != nil { - plog.Panicf("restore alarms error: %v", err) - } - plog.Info("finished recovering alarms") - - if s.authStore != nil { - plog.Info("recovering auth store...") - s.authStore.Recover(newbe) - plog.Info("finished recovering auth store") - } - - plog.Info("recovering store v2...") - if err := s.store.Recovery(apply.snapshot.Data); err != nil { - plog.Panicf("recovery store error: %v", err) - } - plog.Info("finished recovering store v2") - - s.cluster.SetBackend(s.be) - plog.Info("recovering cluster configuration...") - s.cluster.Recover(api.UpdateCapability) - plog.Info("finished recovering cluster configuration") - - plog.Info("removing old peers from network...") - // recover raft transport - s.r.transport.RemoveAllPeers() - plog.Info("finished removing old peers from network") - - plog.Info("adding peers from new cluster configuration into network...") - for _, m := range s.cluster.Members() { - if m.ID == s.ID() { - continue - } - s.r.transport.AddPeer(m.ID, m.PeerURLs) - } - plog.Info("finished adding peers from new cluster configuration into network...") - - ep.appliedt = apply.snapshot.Metadata.Term - ep.appliedi = apply.snapshot.Metadata.Index - ep.snapi = ep.appliedi - ep.confState = apply.snapshot.Metadata.ConfState -} - -func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { - if len(apply.entries) == 0 { - return - } - firsti := apply.entries[0].Index - if firsti > ep.appliedi+1 { - plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi) - } - var ents []raftpb.Entry - if ep.appliedi+1-firsti < uint64(len(apply.entries)) { - ents = apply.entries[ep.appliedi+1-firsti:] - } - if len(ents) == 0 { - return - } - var shouldstop bool - if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop { - go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster")) - } -} - -func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { - if ep.appliedi-ep.snapi <= s.snapCount { - return - } - - plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi) - s.snapshot(ep.appliedi, ep.confState) - ep.snapi = ep.appliedi -} - -func (s *EtcdServer) isMultiNode() bool { - return s.cluster != nil && len(s.cluster.MemberIDs()) > 1 -} - -func (s *EtcdServer) isLeader() bool { - return uint64(s.ID()) == s.Lead() -} - -// transferLeadership transfers the leader to the given transferee. -// TODO: maybe expose to client? -func (s *EtcdServer) transferLeadership(ctx context.Context, lead, transferee uint64) error { - now := time.Now() - interval := time.Duration(s.Cfg.TickMs) * time.Millisecond - - plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee)) - s.r.TransferLeadership(ctx, lead, transferee) - for s.Lead() != transferee { - select { - case <-ctx.Done(): // time out - return ErrTimeoutLeaderTransfer - case <-time.After(interval): - } - } - - // TODO: drain all requests, or drop all messages to the old leader - - plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now)) - return nil -} - -// TransferLeadership transfers the leader to the chosen transferee. -func (s *EtcdServer) TransferLeadership() error { - if !s.isLeader() { - plog.Printf("skipped leadership transfer for stopping non-leader member") - return nil - } - - if !s.isMultiNode() { - plog.Printf("skipped leadership transfer for single member cluster") - return nil - } - - transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs()) - if !ok { - return ErrUnhealthy - } - - tm := s.Cfg.ReqTimeout() - ctx, cancel := context.WithTimeout(s.ctx, tm) - err := s.transferLeadership(ctx, s.Lead(), uint64(transferee)) - cancel() - return err -} - -// HardStop stops the server without coordination with other members in the cluster. -func (s *EtcdServer) HardStop() { - select { - case s.stop <- struct{}{}: - case <-s.done: - return - } - <-s.done -} - -// Stop stops the server gracefully, and shuts down the running goroutine. -// Stop should be called after a Start(s), otherwise it will block forever. -// When stopping leader, Stop transfers its leadership to one of its peers -// before stopping the server. -func (s *EtcdServer) Stop() { - if err := s.TransferLeadership(); err != nil { - plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err) - } - s.HardStop() -} - -// ReadyNotify returns a channel that will be closed when the server -// is ready to serve client requests -func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych } - -func (s *EtcdServer) stopWithDelay(d time.Duration, err error) { - select { - case <-time.After(d): - case <-s.done: - } - select { - case s.errorc <- err: - default: - } -} - -// StopNotify returns a channel that receives a empty struct -// when the server is stopped. -func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done } - -func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() } - -func (s *EtcdServer) LeaderStats() []byte { - lead := atomic.LoadUint64(&s.r.lead) - if lead != uint64(s.id) { - return nil - } - return s.lstats.JSON() -} - -func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() } - -func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { - if s.authStore == nil { - // In the context of ordinary etcd process, s.authStore will never be nil. - // This branch is for handling cases in server_test.go - return nil - } - - // Note that this permission check is done in the API layer, - // so TOCTOU problem can be caused potentially in a schedule like this: - // update membership with user A -> revoke root role of A -> apply membership change - // in the state machine layer - // However, both of membership change and role management requires the root privilege. - // So careful operation by admins can prevent the problem. - authInfo, err := s.AuthInfoFromCtx(ctx) - if err != nil { - return err - } - - return s.AuthStore().IsAdminPermitted(authInfo) -} - -func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err - } - - if s.Cfg.StrictReconfigCheck { - // by default StrictReconfigCheck is enabled; reject new members if unhealthy - if !s.cluster.IsReadyToAddNewMember() { - plog.Warningf("not enough started members, rejecting member add %+v", memb) - return nil, ErrNotEnoughStartedMembers - } - if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) { - plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) - return nil, ErrUnhealthy - } - } - - // TODO: move Member to protobuf type - b, err := json.Marshal(memb) - if err != nil { - return nil, err - } - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: uint64(memb.ID), - Context: b, - } - return s.configure(ctx, cc) -} - -func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err - } - - // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss - if err := s.mayRemoveMember(types.ID(id)); err != nil { - return nil, err - } - - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: id, - } - return s.configure(ctx, cc) -} - -func (s *EtcdServer) mayRemoveMember(id types.ID) error { - if !s.Cfg.StrictReconfigCheck { - return nil - } - - if !s.cluster.IsReadyToRemoveMember(uint64(id)) { - plog.Warningf("not enough started members, rejecting remove member %s", id) - return ErrNotEnoughStartedMembers - } - - // downed member is safe to remove since it's not part of the active quorum - if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() { - return nil - } - - // protect quorum if some members are down - m := s.cluster.Members() - active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m) - if (active - 1) < 1+((len(m)-1)/2) { - plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id) - return ErrUnhealthy - } - - return nil -} - -func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { - b, merr := json.Marshal(memb) - if merr != nil { - return nil, merr - } - - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err - } - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeUpdateNode, - NodeID: uint64(memb.ID), - Context: b, - } - return s.configure(ctx, cc) -} - -// Implement the RaftTimer interface - -func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.r.index) } - -func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.r.term) } - -// Lead is only for testing purposes. -// TODO: add Raft server interface to expose raft related info: -// Index, Term, Lead, Committed, Applied, LastIndex, etc. -func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) } - -func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) } - -type confChangeResponse struct { - membs []*membership.Member - err error -} - -// configure sends a configuration change through consensus and -// then waits for it to be applied to the server. It -// will block until the change is performed or there is an error. -func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) { - cc.ID = s.reqIDGen.Next() - ch := s.w.Register(cc.ID) - start := time.Now() - if err := s.r.ProposeConfChange(ctx, cc); err != nil { - s.w.Trigger(cc.ID, nil) - return nil, err - } - select { - case x := <-ch: - if x == nil { - plog.Panicf("configure trigger value should never be nil") - } - resp := x.(*confChangeResponse) - return resp.membs, resp.err - case <-ctx.Done(): - s.w.Trigger(cc.ID, nil) // GC wait - return nil, s.parseProposeCtxErr(ctx.Err(), start) - case <-s.stopping: - return nil, ErrStopped - } -} - -// sync proposes a SYNC request and is non-blocking. -// This makes no guarantee that the request will be proposed or performed. -// The request will be canceled after the given timeout. -func (s *EtcdServer) sync(timeout time.Duration) { - req := pb.Request{ - Method: "SYNC", - ID: s.reqIDGen.Next(), - Time: time.Now().UnixNano(), - } - data := pbutil.MustMarshal(&req) - // There is no promise that node has leader when do SYNC request, - // so it uses goroutine to propose. - ctx, cancel := context.WithTimeout(s.ctx, timeout) - s.goAttach(func() { - s.r.Propose(ctx, data) - cancel() - }) -} - -// publish registers server information into the cluster. The information -// is the JSON representation of this server's member struct, updated with the -// static clientURLs of the server. -// The function keeps attempting to register until it succeeds, -// or its server is stopped. -func (s *EtcdServer) publish(timeout time.Duration) { - b, err := json.Marshal(s.attributes) - if err != nil { - plog.Panicf("json marshal error: %v", err) - return - } - req := pb.Request{ - Method: "PUT", - Path: membership.MemberAttributesStorePath(s.id), - Val: string(b), - } - - for { - ctx, cancel := context.WithTimeout(s.ctx, timeout) - _, err := s.Do(ctx, req) - cancel() - switch err { - case nil: - close(s.readych) - plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID()) - return - case ErrStopped: - plog.Infof("aborting publish because server is stopped") - return - default: - plog.Errorf("publish error: %v", err) - } - } -} - -func (s *EtcdServer) sendMergedSnap(merged snap.Message) { - atomic.AddInt64(&s.inflightSnapshots, 1) - - s.r.transport.SendSnapshot(merged) - s.goAttach(func() { - select { - case ok := <-merged.CloseNotify(): - // delay releasing inflight snapshot for another 30 seconds to - // block log compaction. - // If the follower still fails to catch up, it is probably just too slow - // to catch up. We cannot avoid the snapshot cycle anyway. - if ok { - select { - case <-time.After(releaseDelayAfterSnapshot): - case <-s.stopping: - } - } - atomic.AddInt64(&s.inflightSnapshots, -1) - case <-s.stopping: - return - } - }) -} - -// apply takes entries received from Raft (after it has been committed) and -// applies them to the current state of the EtcdServer. -// The given entries should not be empty. -func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) { - for i := range es { - e := es[i] - switch e.Type { - case raftpb.EntryNormal: - s.applyEntryNormal(&e) - case raftpb.EntryConfChange: - // set the consistent index of current executing entry - if e.Index > s.consistIndex.ConsistentIndex() { - s.consistIndex.setConsistentIndex(e.Index) - } - var cc raftpb.ConfChange - pbutil.MustUnmarshal(&cc, e.Data) - removedSelf, err := s.applyConfChange(cc, confState) - s.setAppliedIndex(e.Index) - shouldStop = shouldStop || removedSelf - s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) - default: - plog.Panicf("entry type should be either EntryNormal or EntryConfChange") - } - atomic.StoreUint64(&s.r.index, e.Index) - atomic.StoreUint64(&s.r.term, e.Term) - appliedt = e.Term - appliedi = e.Index - } - return appliedt, appliedi, shouldStop -} - -// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer -func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { - shouldApplyV3 := false - if e.Index > s.consistIndex.ConsistentIndex() { - // set the consistent index of current executing entry - s.consistIndex.setConsistentIndex(e.Index) - shouldApplyV3 = true - } - defer s.setAppliedIndex(e.Index) - - // raft state machine may generate noop entry when leader confirmation. - // skip it in advance to avoid some potential bug in the future - if len(e.Data) == 0 { - select { - case s.forceVersionC <- struct{}{}: - default: - } - // promote lessor when the local member is leader and finished - // applying all entries from the last term. - if s.isLeader() { - s.lessor.Promote(s.Cfg.electionTimeout()) - } - return - } - - var raftReq pb.InternalRaftRequest - if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible - var r pb.Request - pbutil.MustUnmarshal(&r, e.Data) - s.w.Trigger(r.ID, s.applyV2Request(&r)) - return - } - if raftReq.V2 != nil { - req := raftReq.V2 - s.w.Trigger(req.ID, s.applyV2Request(req)) - return - } - - // do not re-apply applied entries. - if !shouldApplyV3 { - return - } - - id := raftReq.ID - if id == 0 { - id = raftReq.Header.ID - } - - var ar *applyResult - needResult := s.w.IsRegistered(id) - if needResult || !noSideEffect(&raftReq) { - if !needResult && raftReq.Txn != nil { - removeNeedlessRangeReqs(raftReq.Txn) - } - ar = s.applyV3.Apply(&raftReq) - } - - if ar == nil { - return - } - - if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { - s.w.Trigger(id, ar) - return - } - - plog.Errorf("applying raft message exceeded backend quota") - s.goAttach(func() { - a := &pb.AlarmRequest{ - MemberID: uint64(s.ID()), - Action: pb.AlarmRequest_ACTIVATE, - Alarm: pb.AlarmType_NOSPACE, - } - s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) - s.w.Trigger(id, ar) - }) -} - -// applyConfChange applies a ConfChange to the server. It is only -// invoked with a ConfChange that has already passed through Raft -func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) { - if err := s.cluster.ValidateConfigurationChange(cc); err != nil { - cc.NodeID = raft.None - s.r.ApplyConfChange(cc) - return false, err - } - *confState = *s.r.ApplyConfChange(cc) - switch cc.Type { - case raftpb.ConfChangeAddNode: - m := new(membership.Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) - } - if cc.NodeID != uint64(m.ID) { - plog.Panicf("nodeID should always be equal to member ID") - } - s.cluster.AddMember(m) - if m.ID != s.id { - s.r.transport.AddPeer(m.ID, m.PeerURLs) - } - case raftpb.ConfChangeRemoveNode: - id := types.ID(cc.NodeID) - s.cluster.RemoveMember(id) - if id == s.id { - return true, nil - } - s.r.transport.RemovePeer(id) - case raftpb.ConfChangeUpdateNode: - m := new(membership.Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) - } - if cc.NodeID != uint64(m.ID) { - plog.Panicf("nodeID should always be equal to member ID") - } - s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes) - if m.ID != s.id { - s.r.transport.UpdatePeer(m.ID, m.PeerURLs) - } - } - return false, nil -} - -// TODO: non-blocking snapshot -func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { - clone := s.store.Clone() - // commit kv to write metadata (for example: consistent index) to disk. - // KV().commit() updates the consistent index in backend. - // All operations that update consistent index must be called sequentially - // from applyAll function. - // So KV().Commit() cannot run in parallel with apply. It has to be called outside - // the go routine created below. - s.KV().Commit() - - s.goAttach(func() { - d, err := clone.SaveNoCopy() - // TODO: current store will never fail to do a snapshot - // what should we do if the store might fail? - if err != nil { - plog.Panicf("store save should never fail: %v", err) - } - snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d) - if err != nil { - // the snapshot was done asynchronously with the progress of raft. - // raft might have already got a newer snapshot. - if err == raft.ErrSnapOutOfDate { - return - } - plog.Panicf("unexpected create snapshot error %v", err) - } - // SaveSnap saves the snapshot and releases the locked wal files - // to the snapshot index. - if err = s.r.storage.SaveSnap(snap); err != nil { - plog.Fatalf("save snapshot error: %v", err) - } - plog.Infof("saved snapshot at index %d", snap.Metadata.Index) - - // When sending a snapshot, etcd will pause compaction. - // After receives a snapshot, the slow follower needs to get all the entries right after - // the snapshot sent to catch up. If we do not pause compaction, the log entries right after - // the snapshot sent might already be compacted. It happens when the snapshot takes long time - // to send and save. Pausing compaction avoids triggering a snapshot sending cycle. - if atomic.LoadInt64(&s.inflightSnapshots) != 0 { - plog.Infof("skip compaction since there is an inflight snapshot") - return - } - - // keep some in memory log entries for slow followers. - compacti := uint64(1) - if snapi > numberOfCatchUpEntries { - compacti = snapi - numberOfCatchUpEntries - } - err = s.r.raftStorage.Compact(compacti) - if err != nil { - // the compaction was done asynchronously with the progress of raft. - // raft log might already been compact. - if err == raft.ErrCompacted { - return - } - plog.Panicf("unexpected compaction error %v", err) - } - plog.Infof("compacted raft log at %d", compacti) - }) -} - -// CutPeer drops messages to the specified peer. -func (s *EtcdServer) CutPeer(id types.ID) { - tr, ok := s.r.transport.(*rafthttp.Transport) - if ok { - tr.CutPeer(id) - } -} - -// MendPeer recovers the message dropping behavior of the given peer. -func (s *EtcdServer) MendPeer(id types.ID) { - tr, ok := s.r.transport.(*rafthttp.Transport) - if ok { - tr.MendPeer(id) - } -} - -func (s *EtcdServer) PauseSending() { s.r.pauseSending() } - -func (s *EtcdServer) ResumeSending() { s.r.resumeSending() } - -func (s *EtcdServer) ClusterVersion() *semver.Version { - if s.cluster == nil { - return nil - } - return s.cluster.Version() -} - -// monitorVersions checks the member's version every monitorVersionInterval. -// It updates the cluster version if all members agrees on a higher one. -// It prints out log if there is a member with a higher version than the -// local version. -func (s *EtcdServer) monitorVersions() { - for { - select { - case <-s.forceVersionC: - case <-time.After(monitorVersionInterval): - case <-s.stopping: - return - } - - if s.Leader() != s.ID() { - continue - } - - v := decideClusterVersion(getVersions(s.cluster, s.id, s.peerRt)) - if v != nil { - // only keep major.minor version for comparison - v = &semver.Version{ - Major: v.Major, - Minor: v.Minor, - } - } - - // if the current version is nil: - // 1. use the decided version if possible - // 2. or use the min cluster version - if s.cluster.Version() == nil { - verStr := version.MinClusterVersion - if v != nil { - verStr = v.String() - } - s.goAttach(func() { s.updateClusterVersion(verStr) }) - continue - } - - // update cluster version only if the decided version is greater than - // the current cluster version - if v != nil && s.cluster.Version().LessThan(*v) { - s.goAttach(func() { s.updateClusterVersion(v.String()) }) - } - } -} - -func (s *EtcdServer) updateClusterVersion(ver string) { - if s.cluster.Version() == nil { - plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver)) - } else { - plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver)) - } - req := pb.Request{ - Method: "PUT", - Path: membership.StoreClusterVersionKey(), - Val: ver, - } - ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) - _, err := s.Do(ctx, req) - cancel() - switch err { - case nil: - return - case ErrStopped: - plog.Infof("aborting update cluster version because server is stopped") - return - default: - plog.Errorf("error updating cluster version (%v)", err) - } -} - -func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { - switch err { - case context.Canceled: - return ErrCanceled - case context.DeadlineExceeded: - s.leadTimeMu.RLock() - curLeadElected := s.leadElectedTime - s.leadTimeMu.RUnlock() - prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) - if start.After(prevLeadLost) && start.Before(curLeadElected) { - return ErrTimeoutDueToLeaderFail - } - - lead := types.ID(atomic.LoadUint64(&s.r.lead)) - switch lead { - case types.ID(raft.None): - // TODO: return error to specify it happens because the cluster does not have leader now - case s.ID(): - if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) { - return ErrTimeoutDueToConnectionLost - } - default: - if !isConnectedSince(s.r.transport, start, lead) { - return ErrTimeoutDueToConnectionLost - } - } - - return ErrTimeout - default: - return err - } -} - -func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv } -func (s *EtcdServer) Backend() backend.Backend { - s.bemu.Lock() - defer s.bemu.Unlock() - return s.be -} - -func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore } - -func (s *EtcdServer) restoreAlarms() error { - s.applyV3 = s.newApplierV3() - as, err := alarm.NewAlarmStore(s) - if err != nil { - return err - } - s.alarmStore = as - if len(as.Get(pb.AlarmType_NOSPACE)) > 0 { - s.applyV3 = newApplierV3Capped(s.applyV3) - } - return nil -} - -func (s *EtcdServer) getAppliedIndex() uint64 { - return atomic.LoadUint64(&s.appliedIndex) -} - -func (s *EtcdServer) setAppliedIndex(v uint64) { - atomic.StoreUint64(&s.appliedIndex, v) -} - -func (s *EtcdServer) getCommittedIndex() uint64 { - return atomic.LoadUint64(&s.committedIndex) -} - -func (s *EtcdServer) setCommittedIndex(v uint64) { - atomic.StoreUint64(&s.committedIndex, v) -} - -// goAttach creates a goroutine on a given function and tracks it using -// the etcdserver waitgroup. -func (s *EtcdServer) goAttach(f func()) { - s.wgMu.RLock() // this blocks with ongoing close(s.stopping) - defer s.wgMu.RUnlock() - select { - case <-s.stopping: - plog.Warning("server has stopped (skipping goAttach)") - return - default: - } - - // now safe to add since waitgroup wait has not started yet - s.wg.Add(1) - go func() { - defer s.wg.Done() - f() - }() -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go deleted file mode 100644 index 928aa95b6..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "io" - - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" -) - -// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf), -// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message -// as ReadCloser. -func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message { - // get a snapshot of v2 store as []byte - clone := s.store.Clone() - d, err := clone.SaveNoCopy() - if err != nil { - plog.Panicf("store save should never fail: %v", err) - } - - // commit kv to write metadata(for example: consistent index). - s.KV().Commit() - dbsnap := s.be.Snapshot() - // get a snapshot of v3 KV as readCloser - rc := newSnapshotReaderCloser(dbsnap) - - // put the []byte snapshot of store into raft snapshot and return the merged snapshot with - // KV readCloser snapshot. - snapshot := raftpb.Snapshot{ - Metadata: raftpb.SnapshotMetadata{ - Index: snapi, - Term: snapt, - ConfState: confState, - }, - Data: d, - } - m.Snapshot = snapshot - - return *snap.NewMessage(m, rc, dbsnap.Size()) -} - -func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser { - pr, pw := io.Pipe() - go func() { - n, err := snapshot.WriteTo(pw) - if err == nil { - plog.Infof("wrote database snapshot out [total bytes: %d]", n) - } else { - plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) - } - pw.CloseWithError(err) - err = snapshot.Close() - if err != nil { - plog.Panicf("failed to close database snapshot: %v", err) - } - }() - return pr -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go deleted file mode 100644 index aa8f87569..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/storage.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "io" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" - "github.com/coreos/etcd/wal" - "github.com/coreos/etcd/wal/walpb" -) - -type Storage interface { - // Save function saves ents and state to the underlying stable storage. - // Save MUST block until st and ents are on stable storage. - Save(st raftpb.HardState, ents []raftpb.Entry) error - // SaveSnap function saves snapshot to the underlying stable storage. - SaveSnap(snap raftpb.Snapshot) error - // Close closes the Storage and performs finalization. - Close() error -} - -type storage struct { - *wal.WAL - *snap.Snapshotter -} - -func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage { - return &storage{w, s} -} - -// SaveSnap saves the snapshot to disk and release the locked -// wal files since they will not be used. -func (st *storage) SaveSnap(snap raftpb.Snapshot) error { - walsnap := walpb.Snapshot{ - Index: snap.Metadata.Index, - Term: snap.Metadata.Term, - } - err := st.WAL.SaveSnapshot(walsnap) - if err != nil { - return err - } - err = st.Snapshotter.SaveSnap(snap) - if err != nil { - return err - } - return st.WAL.ReleaseLockTo(snap.Metadata.Index) -} - -func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) { - var ( - err error - wmetadata []byte - ) - - repaired := false - for { - if w, err = wal.Open(waldir, snap); err != nil { - plog.Fatalf("open wal error: %v", err) - } - if wmetadata, st, ents, err = w.ReadAll(); err != nil { - w.Close() - // we can only repair ErrUnexpectedEOF and we never repair twice. - if repaired || err != io.ErrUnexpectedEOF { - plog.Fatalf("read wal error (%v) and cannot be repaired", err) - } - if !wal.Repair(waldir) { - plog.Fatalf("WAL error (%v) cannot be repaired", err) - } else { - plog.Infof("repaired WAL error (%v)", err) - repaired = true - } - continue - } - break - } - var metadata pb.Metadata - pbutil.MustUnmarshal(&metadata, wmetadata) - id = types.ID(metadata.NodeID) - cid = types.ID(metadata.ClusterID) - return -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go deleted file mode 100644 index e3896ffc2..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/util.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "time" - - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/rafthttp" -) - -// isConnectedToQuorumSince checks whether the local member is connected to the -// quorum of the cluster since the given time. -func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { - return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1 -} - -// isConnectedSince checks whether the local member is connected to the -// remote member since the given time. -func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool { - t := transport.ActiveSince(remote) - return !t.IsZero() && t.Before(since) -} - -// isConnectedFullySince checks whether the local member is connected to all -// members in the cluster since the given time. -func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { - return numConnectedSince(transport, since, self, members) == len(members) -} - -// numConnectedSince counts how many members are connected to the local member -// since the given time. -func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int { - connectedNum := 0 - for _, m := range members { - if m.ID == self || isConnectedSince(transport, since, m.ID) { - connectedNum++ - } - } - return connectedNum -} - -// longestConnected chooses the member with longest active-since-time. -// It returns false, if nothing is active. -func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) { - var longest types.ID - var oldest time.Time - for _, id := range membs { - tm := tp.ActiveSince(id) - if tm.IsZero() { // inactive - continue - } - - if oldest.IsZero() { // first longest candidate - oldest = tm - longest = id - } - - if tm.Before(oldest) { - oldest = tm - longest = id - } - } - if uint64(longest) == 0 { - return longest, false - } - return longest, true -} - -type notifier struct { - c chan struct{} - err error -} - -func newNotifier() *notifier { - return ¬ifier{ - c: make(chan struct{}), - } -} - -func (nc *notifier) notify(err error) { - nc.err = err - close(nc.c) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go deleted file mode 100644 index 72c4eb7c5..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" -) - -type v2API interface { - Post(ctx context.Context, r *pb.Request) (Response, error) - Put(ctx context.Context, r *pb.Request) (Response, error) - Delete(ctx context.Context, r *pb.Request) (Response, error) - QGet(ctx context.Context, r *pb.Request) (Response, error) - Get(ctx context.Context, r *pb.Request) (Response, error) - Head(ctx context.Context, r *pb.Request) (Response, error) -} - -type v2apiStore struct{ s *EtcdServer } - -func (a *v2apiStore) Post(ctx context.Context, r *pb.Request) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *v2apiStore) Put(ctx context.Context, r *pb.Request) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *v2apiStore) Delete(ctx context.Context, r *pb.Request) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *v2apiStore) QGet(ctx context.Context, r *pb.Request) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *v2apiStore) processRaftRequest(ctx context.Context, r *pb.Request) (Response, error) { - data, err := r.Marshal() - if err != nil { - return Response{}, err - } - ch := a.s.w.Register(r.ID) - - start := time.Now() - a.s.r.Propose(ctx, data) - proposalsPending.Inc() - defer proposalsPending.Dec() - - select { - case x := <-ch: - resp := x.(Response) - return resp, resp.err - case <-ctx.Done(): - proposalsFailed.Inc() - a.s.w.Trigger(r.ID, nil) // GC wait - return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start) - case <-a.s.stopping: - } - return Response{}, ErrStopped -} - -func (a *v2apiStore) Get(ctx context.Context, r *pb.Request) (Response, error) { - if r.Wait { - wc, err := a.s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since) - if err != nil { - return Response{}, err - } - return Response{Watcher: wc}, nil - } - ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted) - if err != nil { - return Response{}, err - } - return Response{Event: ev}, nil -} - -func (a *v2apiStore) Head(ctx context.Context, r *pb.Request) (Response, error) { - ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted) - if err != nil { - return Response{}, err - } - return Response{Event: ev}, nil -} - -// Do interprets r and performs an operation on s.store according to r.Method -// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with -// Quorum == true, r will be sent through consensus before performing its -// respective operation. Do will block until an action is performed or there is -// an error. -func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { - r.ID = s.reqIDGen.Next() - if r.Method == "GET" && r.Quorum { - r.Method = "QGET" - } - v2api := (v2API)(&v2apiStore{s}) - switch r.Method { - case "POST": - return v2api.Post(ctx, &r) - case "PUT": - return v2api.Put(ctx, &r) - case "DELETE": - return v2api.Delete(ctx, &r) - case "QGET": - return v2api.QGet(ctx, &r) - case "GET": - return v2api.Get(ctx, &r) - case "HEAD": - return v2api.Head(ctx, &r) - } - return Response{}, ErrUnknownMethod -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go deleted file mode 100644 index 870f98e06..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go +++ /dev/null @@ -1,692 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "bytes" - "encoding/binary" - "time" - - "github.com/gogo/protobuf/proto" - - "github.com/coreos/etcd/auth" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/lease/leasehttp" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/raft" - - "golang.org/x/net/context" -) - -const ( - // the max request size that raft accepts. - // TODO: make this a flag? But we probably do not want to - // accept large request which might block raft stream. User - // specify a large value might end up with shooting in the foot. - maxRequestBytes = 1.5 * 1024 * 1024 - - // In the health case, there might be a small gap (10s of entries) between - // the applied index and committed index. - // However, if the committed entries are very heavy to apply, the gap might grow. - // We should stop accepting new proposals if the gap growing to a certain point. - maxGapBetweenApplyAndCommitIndex = 5000 -) - -type RaftKV interface { - Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) - Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) - DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) - Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) - Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) -} - -type Lessor interface { - // LeaseGrant sends LeaseGrant request to raft and apply it after committed. - LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) - // LeaseRevoke sends LeaseRevoke request to raft and apply it after committed. - LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) - - // LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error - // is returned. - LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) - - // LeaseTimeToLive retrieves lease information. - LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) -} - -type Authenticator interface { - AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) - AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) - Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) - UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) -} - -func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if !r.Serializable { - err := s.linearizableReadNotify(ctx) - if err != nil { - return nil, err - } - } - var resp *pb.RangeResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) - } - get := func() { resp, err = s.applyV3Base.Range(nil, r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err -} - -func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r}) - if err != nil { - return nil, err - } - return resp.(*pb.PutResponse), nil -} - -func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) - if err != nil { - return nil, err - } - return resp.(*pb.DeleteRangeResponse), nil -} - -func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if isTxnReadonly(r) { - if !isTxnSerializable(r) { - err := s.linearizableReadNotify(ctx) - if err != nil { - return nil, err - } - } - var resp *pb.TxnResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return checkTxnAuth(s.authStore, ai, r) - } - get := func() { resp, err = s.applyV3Base.Txn(r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err - } - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r}) - if err != nil { - return nil, err - } - return resp.(*pb.TxnResponse), nil -} - -func isTxnSerializable(r *pb.TxnRequest) bool { - for _, u := range r.Success { - if r := u.GetRequestRange(); r == nil || !r.Serializable { - return false - } - } - for _, u := range r.Failure { - if r := u.GetRequestRange(); r == nil || !r.Serializable { - return false - } - } - return true -} - -func isTxnReadonly(r *pb.TxnRequest) bool { - for _, u := range r.Success { - if r := u.GetRequestRange(); r == nil { - return false - } - } - for _, u := range r.Failure { - if r := u.GetRequestRange(); r == nil { - return false - } - } - return true -} - -func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r}) - if r.Physical && result != nil && result.physc != nil { - <-result.physc - // The compaction is done deleting keys; the hash is now settled - // but the data is not necessarily committed. If there's a crash, - // the hash may revert to a hash prior to compaction completing - // if the compaction resumes. Force the finished compaction to - // commit so it won't resume following a crash. - s.be.ForceCommit() - } - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - resp := result.resp.(*pb.CompactionResponse) - if resp == nil { - resp = &pb.CompactionResponse{} - } - if resp.Header == nil { - resp.Header = &pb.ResponseHeader{} - } - resp.Header.Revision = s.kv.Rev() - return resp, nil -} - -func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - // no id given? choose one - for r.ID == int64(lease.NoLease) { - // only use positive int64 id's - r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1)) - } - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) - if err != nil { - return nil, err - } - return resp.(*pb.LeaseGrantResponse), nil -} - -func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) - if err != nil { - return nil, err - } - return resp.(*pb.LeaseRevokeResponse), nil -} - -func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) { - ttl, err := s.lessor.Renew(id) - if err == nil { // already requested to primary lessor(leader) - return ttl, nil - } - if err != lease.ErrNotPrimary { - return -1, err - } - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - - // renewals don't go through raft; forward to leader manually - for cctx.Err() == nil && err != nil { - leader, lerr := s.waitLeader(cctx) - if lerr != nil { - return -1, lerr - } - for _, url := range leader.PeerURLs { - lurl := url + leasehttp.LeasePrefix - ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt) - if err == nil || err == lease.ErrLeaseNotFound { - return ttl, err - } - } - } - return -1, ErrTimeout -} - -func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - if s.Leader() == s.ID() { - // primary; timetolive directly from leader - le := s.lessor.Lookup(lease.LeaseID(r.ID)) - if le == nil { - return nil, lease.ErrLeaseNotFound - } - // TODO: fill out ResponseHeader - resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()} - if r.Keys { - ks := le.Keys() - kbs := make([][]byte, len(ks)) - for i := range ks { - kbs[i] = []byte(ks[i]) - } - resp.Keys = kbs - } - return resp, nil - } - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - - // forward to leader - for cctx.Err() == nil { - leader, err := s.waitLeader(cctx) - if err != nil { - return nil, err - } - for _, url := range leader.PeerURLs { - lurl := url + leasehttp.LeaseInternalPrefix - resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt) - if err == nil { - return resp.LeaseTimeToLiveResponse, nil - } - if err == lease.ErrLeaseNotFound { - return nil, err - } - } - } - return nil, ErrTimeout -} - -func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) { - leader := s.cluster.Member(s.Leader()) - for leader == nil { - // wait an election - dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond - select { - case <-time.After(dur): - leader = s.cluster.Member(s.Leader()) - case <-s.stopping: - return nil, ErrStopped - case <-ctx.Done(): - return nil, ErrNoLeader - } - } - if leader == nil || len(leader.PeerURLs) == 0 { - return nil, ErrNoLeader - } - return leader, nil -} - -func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AlarmResponse), nil -} - -func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthEnableResponse), nil -} - -func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthDisableResponse), nil -} - -func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - if err := s.linearizableReadNotify(ctx); err != nil { - return nil, err - } - - var resp proto.Message - for { - checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) - if err != nil { - if err != auth.ErrAuthNotEnabled { - plog.Errorf("invalid authentication request to user %s was issued", r.Name) - } - return nil, err - } - - st, err := s.AuthStore().GenTokenPrefix() - if err != nil { - return nil, err - } - - internalReq := &pb.InternalAuthenticateRequest{ - Name: r.Name, - Password: r.Password, - SimpleToken: st, - } - - resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) - if err != nil { - return nil, err - } - if checkedRevision == s.AuthStore().Revision() { - break - } - plog.Infof("revision when password checked is obsolete, retrying") - } - - return resp.(*pb.AuthenticateResponse), nil -} - -func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserAddResponse), nil -} - -func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserDeleteResponse), nil -} - -func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserChangePasswordResponse), nil -} - -func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserGrantRoleResponse), nil -} - -func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserGetResponse), nil -} - -func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserListResponse), nil -} - -func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserRevokeRoleResponse), nil -} - -func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleAddResponse), nil -} - -func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleGrantPermissionResponse), nil -} - -func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleGetResponse), nil -} - -func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleListResponse), nil -} - -func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleRevokePermissionResponse), nil -} - -func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleDeleteResponse), nil -} - -func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { - result, err := s.processInternalRaftRequestOnce(ctx, r) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp, nil -} - -func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { - for { - resp, err := s.raftRequestOnce(ctx, r) - if err != auth.ErrAuthOldRevision { - return resp, err - } - } -} - -// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure. -func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error { - for { - ai, err := s.AuthInfoFromCtx(ctx) - if err != nil { - return err - } - if ai == nil { - // chk expects non-nil AuthInfo; use empty credentials - ai = &auth.AuthInfo{} - } - if err = chk(ai); err != nil { - if err == auth.ErrAuthOldRevision { - continue - } - return err - } - // fetch response for serialized request - get() - // empty credentials or current auth info means no need to retry - if ai.Revision == 0 || ai.Revision == s.authStore.Revision() { - return nil - } - // avoid TOCTOU error, retry of the request is required. - } -} - -func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { - ai := s.getAppliedIndex() - ci := s.getCommittedIndex() - if ci > ai+maxGapBetweenApplyAndCommitIndex { - return nil, ErrTooManyRequests - } - - r.Header = &pb.RequestHeader{ - ID: s.reqIDGen.Next(), - } - - authInfo, err := s.AuthInfoFromCtx(ctx) - if err != nil { - return nil, err - } - if authInfo != nil { - r.Header.Username = authInfo.Username - r.Header.AuthRevision = authInfo.Revision - } - - data, err := r.Marshal() - if err != nil { - return nil, err - } - - if len(data) > maxRequestBytes { - return nil, ErrRequestTooLarge - } - - id := r.ID - if id == 0 { - id = r.Header.ID - } - ch := s.w.Register(id) - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - - start := time.Now() - s.r.Propose(cctx, data) - proposalsPending.Inc() - defer proposalsPending.Dec() - - select { - case x := <-ch: - return x.(*applyResult), nil - case <-cctx.Done(): - proposalsFailed.Inc() - s.w.Trigger(id, nil) // GC wait - return nil, s.parseProposeCtxErr(cctx.Err(), start) - case <-s.done: - return nil, ErrStopped - } -} - -// Watchable returns a watchable interface attached to the etcdserver. -func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } - -func (s *EtcdServer) linearizableReadLoop() { - var rs raft.ReadState - - for { - ctx := make([]byte, 8) - binary.BigEndian.PutUint64(ctx, s.reqIDGen.Next()) - - select { - case <-s.readwaitc: - case <-s.stopping: - return - } - - nextnr := newNotifier() - - s.readMu.Lock() - nr := s.readNotifier - s.readNotifier = nextnr - s.readMu.Unlock() - - cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) - if err := s.r.ReadIndex(cctx, ctx); err != nil { - cancel() - if err == raft.ErrStopped { - return - } - plog.Errorf("failed to get read index from raft: %v", err) - nr.notify(err) - continue - } - cancel() - - var ( - timeout bool - done bool - ) - for !timeout && !done { - select { - case rs = <-s.r.readStateC: - done = bytes.Equal(rs.RequestCtx, ctx) - if !done { - // a previous request might time out. now we should ignore the response of it and - // continue waiting for the response of the current requests. - plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx) - } - case <-time.After(s.Cfg.ReqTimeout()): - plog.Warningf("timed out waiting for read index response") - nr.notify(ErrTimeout) - timeout = true - case <-s.stopping: - return - } - } - if !done { - continue - } - - if ai := s.getAppliedIndex(); ai < rs.Index { - select { - case <-s.applyWait.Wait(rs.Index): - case <-s.stopping: - return - } - } - // unblock all l-reads requested at indices before rs.Index - nr.notify(nil) - } -} - -func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { - s.readMu.RLock() - nc := s.readNotifier - s.readMu.RUnlock() - - // signal linearizable loop for current notify if it hasn't been already - select { - case s.readwaitc <- struct{}{}: - default: - } - - // wait for read state notification - select { - case <-nc.c: - return nc.err - case <-ctx.Done(): - return ctx.Err() - case <-s.done: - return ErrStopped - } -} - -func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) { - if s.Cfg.ClientCertAuthEnabled { - authInfo := s.AuthStore().AuthInfoFromTLS(ctx) - if authInfo != nil { - return authInfo, nil - } - } - - return s.AuthStore().AuthInfoFromCtx(ctx) -} diff --git a/vendor/github.com/coreos/etcd/main.go b/vendor/github.com/coreos/etcd/main.go deleted file mode 100644 index 0b7357376..000000000 --- a/vendor/github.com/coreos/etcd/main.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package main is a simple wrapper of the real etcd entrypoint package -// (located at github.com/coreos/etcd/etcdmain) to ensure that etcd is still -// "go getable"; e.g. `go get github.com/coreos/etcd` works as expected and -// builds a binary in $GOBIN/etcd -// -// This package should NOT be extended or modified in any way; to modify the -// etcd binary, work in the `github.com/coreos/etcd/etcdmain` package. -// -package main - -import "github.com/coreos/etcd/etcdmain" - -func main() { - etcdmain.Main() -} diff --git a/vendor/github.com/coreos/etcd/mvcc/doc.go b/vendor/github.com/coreos/etcd/mvcc/doc.go deleted file mode 100644 index ad5be0308..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package mvcc defines etcd's stable MVCC storage. -package mvcc diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go deleted file mode 100644 index 991289cdd..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/index.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "sort" - "sync" - - "github.com/google/btree" -) - -type index interface { - Get(key []byte, atRev int64) (rev, created revision, ver int64, err error) - Range(key, end []byte, atRev int64) ([][]byte, []revision) - Put(key []byte, rev revision) - Tombstone(key []byte, rev revision) error - RangeSince(key, end []byte, rev int64) []revision - Compact(rev int64) map[revision]struct{} - Equal(b index) bool - - Insert(ki *keyIndex) - KeyIndex(ki *keyIndex) *keyIndex -} - -type treeIndex struct { - sync.RWMutex - tree *btree.BTree -} - -func newTreeIndex() index { - return &treeIndex{ - tree: btree.New(32), - } -} - -func (ti *treeIndex) Put(key []byte, rev revision) { - keyi := &keyIndex{key: key} - - ti.Lock() - defer ti.Unlock() - item := ti.tree.Get(keyi) - if item == nil { - keyi.put(rev.main, rev.sub) - ti.tree.ReplaceOrInsert(keyi) - return - } - okeyi := item.(*keyIndex) - okeyi.put(rev.main, rev.sub) -} - -func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { - keyi := &keyIndex{key: key} - ti.RLock() - defer ti.RUnlock() - if keyi = ti.keyIndex(keyi); keyi == nil { - return revision{}, revision{}, 0, ErrRevisionNotFound - } - return keyi.get(atRev) -} - -func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { - ti.RLock() - defer ti.RUnlock() - return ti.keyIndex(keyi) -} - -func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { - if item := ti.tree.Get(keyi); item != nil { - return item.(*keyIndex) - } - return nil -} - -func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { - if end == nil { - rev, _, _, err := ti.Get(key, atRev) - if err != nil { - return nil, nil - } - return [][]byte{key}, []revision{rev} - } - - keyi := &keyIndex{key: key} - endi := &keyIndex{key: end} - - ti.RLock() - defer ti.RUnlock() - - ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { - if len(endi.key) > 0 && !item.Less(endi) { - return false - } - curKeyi := item.(*keyIndex) - rev, _, _, err := curKeyi.get(atRev) - if err != nil { - return true - } - revs = append(revs, rev) - keys = append(keys, curKeyi.key) - return true - }) - - return keys, revs -} - -func (ti *treeIndex) Tombstone(key []byte, rev revision) error { - keyi := &keyIndex{key: key} - - ti.Lock() - defer ti.Unlock() - item := ti.tree.Get(keyi) - if item == nil { - return ErrRevisionNotFound - } - - ki := item.(*keyIndex) - return ki.tombstone(rev.main, rev.sub) -} - -// RangeSince returns all revisions from key(including) to end(excluding) -// at or after the given rev. The returned slice is sorted in the order -// of revision. -func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { - ti.RLock() - defer ti.RUnlock() - - keyi := &keyIndex{key: key} - if end == nil { - item := ti.tree.Get(keyi) - if item == nil { - return nil - } - keyi = item.(*keyIndex) - return keyi.since(rev) - } - - endi := &keyIndex{key: end} - var revs []revision - ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { - if len(endi.key) > 0 && !item.Less(endi) { - return false - } - curKeyi := item.(*keyIndex) - revs = append(revs, curKeyi.since(rev)...) - return true - }) - sort.Sort(revisions(revs)) - - return revs -} - -func (ti *treeIndex) Compact(rev int64) map[revision]struct{} { - available := make(map[revision]struct{}) - var emptyki []*keyIndex - plog.Printf("store.index: compact %d", rev) - // TODO: do not hold the lock for long time? - // This is probably OK. Compacting 10M keys takes O(10ms). - ti.Lock() - defer ti.Unlock() - ti.tree.Ascend(compactIndex(rev, available, &emptyki)) - for _, ki := range emptyki { - item := ti.tree.Delete(ki) - if item == nil { - plog.Panic("store.index: unexpected delete failure during compaction") - } - } - return available -} - -func compactIndex(rev int64, available map[revision]struct{}, emptyki *[]*keyIndex) func(i btree.Item) bool { - return func(i btree.Item) bool { - keyi := i.(*keyIndex) - keyi.compact(rev, available) - if keyi.isEmpty() { - *emptyki = append(*emptyki, keyi) - } - return true - } -} - -func (a *treeIndex) Equal(bi index) bool { - b := bi.(*treeIndex) - - if a.tree.Len() != b.tree.Len() { - return false - } - - equal := true - - a.tree.Ascend(func(item btree.Item) bool { - aki := item.(*keyIndex) - bki := b.tree.Get(item).(*keyIndex) - if !aki.equal(bki) { - equal = false - return false - } - return true - }) - - return equal -} - -func (ti *treeIndex) Insert(ki *keyIndex) { - ti.Lock() - defer ti.Unlock() - ti.tree.ReplaceOrInsert(ki) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go deleted file mode 100644 index 9104f9b2d..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/key_index.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "bytes" - "errors" - "fmt" - - "github.com/google/btree" -) - -var ( - ErrRevisionNotFound = errors.New("mvcc: revision not found") -) - -// keyIndex stores the revisions of a key in the backend. -// Each keyIndex has at least one key generation. -// Each generation might have several key versions. -// Tombstone on a key appends an tombstone version at the end -// of the current generation and creates a new empty generation. -// Each version of a key has an index pointing to the backend. -// -// For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo" -// generate a keyIndex: -// key: "foo" -// rev: 5 -// generations: -// {empty} -// {4.0, 5.0(t)} -// {1.0, 2.0, 3.0(t)} -// -// Compact a keyIndex removes the versions with smaller or equal to -// rev except the largest one. If the generation becomes empty -// during compaction, it will be removed. if all the generations get -// removed, the keyIndex should be removed. - -// For example: -// compact(2) on the previous example -// generations: -// {empty} -// {4.0, 5.0(t)} -// {2.0, 3.0(t)} -// -// compact(4) -// generations: -// {empty} -// {4.0, 5.0(t)} -// -// compact(5): -// generations: -// {empty} -> key SHOULD be removed. -// -// compact(6): -// generations: -// {empty} -> key SHOULD be removed. -type keyIndex struct { - key []byte - modified revision // the main rev of the last modification - generations []generation -} - -// put puts a revision to the keyIndex. -func (ki *keyIndex) put(main int64, sub int64) { - rev := revision{main: main, sub: sub} - - if !rev.GreaterThan(ki.modified) { - plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified) - } - if len(ki.generations) == 0 { - ki.generations = append(ki.generations, generation{}) - } - g := &ki.generations[len(ki.generations)-1] - if len(g.revs) == 0 { // create a new key - keysGauge.Inc() - g.created = rev - } - g.revs = append(g.revs, rev) - g.ver++ - ki.modified = rev -} - -func (ki *keyIndex) restore(created, modified revision, ver int64) { - if len(ki.generations) != 0 { - plog.Panicf("store.keyindex: cannot restore non-empty keyIndex") - } - - ki.modified = modified - g := generation{created: created, ver: ver, revs: []revision{modified}} - ki.generations = append(ki.generations, g) - keysGauge.Inc() -} - -// tombstone puts a revision, pointing to a tombstone, to the keyIndex. -// It also creates a new empty generation in the keyIndex. -// It returns ErrRevisionNotFound when tombstone on an empty generation. -func (ki *keyIndex) tombstone(main int64, sub int64) error { - if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key)) - } - if ki.generations[len(ki.generations)-1].isEmpty() { - return ErrRevisionNotFound - } - ki.put(main, sub) - ki.generations = append(ki.generations, generation{}) - keysGauge.Dec() - return nil -} - -// get gets the modified, created revision and version of the key that satisfies the given atRev. -// Rev must be higher than or equal to the given atRev. -func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err error) { - if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) - } - g := ki.findGeneration(atRev) - if g.isEmpty() { - return revision{}, revision{}, 0, ErrRevisionNotFound - } - - n := g.walk(func(rev revision) bool { return rev.main > atRev }) - if n != -1 { - return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil - } - - return revision{}, revision{}, 0, ErrRevisionNotFound -} - -// since returns revisions since the given rev. Only the revision with the -// largest sub revision will be returned if multiple revisions have the same -// main revision. -func (ki *keyIndex) since(rev int64) []revision { - if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) - } - since := revision{rev, 0} - var gi int - // find the generations to start checking - for gi = len(ki.generations) - 1; gi > 0; gi-- { - g := ki.generations[gi] - if g.isEmpty() { - continue - } - if since.GreaterThan(g.created) { - break - } - } - - var revs []revision - var last int64 - for ; gi < len(ki.generations); gi++ { - for _, r := range ki.generations[gi].revs { - if since.GreaterThan(r) { - continue - } - if r.main == last { - // replace the revision with a new one that has higher sub value, - // because the original one should not be seen by external - revs[len(revs)-1] = r - continue - } - revs = append(revs, r) - last = r.main - } - } - return revs -} - -// compact compacts a keyIndex by removing the versions with smaller or equal -// revision than the given atRev except the largest one (If the largest one is -// a tombstone, it will not be kept). -// If a generation becomes empty during compaction, it will be removed. -func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { - if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key)) - } - - // walk until reaching the first revision that has an revision smaller or equal to - // the atRev. - // add it to the available map - f := func(rev revision) bool { - if rev.main <= atRev { - available[rev] = struct{}{} - return false - } - return true - } - - i, g := 0, &ki.generations[0] - // find first generation includes atRev or created after atRev - for i < len(ki.generations)-1 { - if tomb := g.revs[len(g.revs)-1].main; tomb > atRev { - break - } - i++ - g = &ki.generations[i] - } - - if !g.isEmpty() { - n := g.walk(f) - // remove the previous contents. - if n != -1 { - g.revs = g.revs[n:] - } - // remove any tombstone - if len(g.revs) == 1 && i != len(ki.generations)-1 { - delete(available, g.revs[0]) - i++ - } - } - // remove the previous generations. - ki.generations = ki.generations[i:] -} - -func (ki *keyIndex) isEmpty() bool { - return len(ki.generations) == 1 && ki.generations[0].isEmpty() -} - -// findGeneration finds out the generation of the keyIndex that the -// given rev belongs to. If the given rev is at the gap of two generations, -// which means that the key does not exist at the given rev, it returns nil. -func (ki *keyIndex) findGeneration(rev int64) *generation { - lastg := len(ki.generations) - 1 - cg := lastg - - for cg >= 0 { - if len(ki.generations[cg].revs) == 0 { - cg-- - continue - } - g := ki.generations[cg] - if cg != lastg { - if tomb := g.revs[len(g.revs)-1].main; tomb <= rev { - return nil - } - } - if g.revs[0].main <= rev { - return &ki.generations[cg] - } - cg-- - } - return nil -} - -func (a *keyIndex) Less(b btree.Item) bool { - return bytes.Compare(a.key, b.(*keyIndex).key) == -1 -} - -func (a *keyIndex) equal(b *keyIndex) bool { - if !bytes.Equal(a.key, b.key) { - return false - } - if a.modified != b.modified { - return false - } - if len(a.generations) != len(b.generations) { - return false - } - for i := range a.generations { - ag, bg := a.generations[i], b.generations[i] - if !ag.equal(bg) { - return false - } - } - return true -} - -func (ki *keyIndex) String() string { - var s string - for _, g := range ki.generations { - s += g.String() - } - return s -} - -// generation contains multiple revisions of a key. -type generation struct { - ver int64 - created revision // when the generation is created (put in first revision). - revs []revision -} - -func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 } - -// walk walks through the revisions in the generation in descending order. -// It passes the revision to the given function. -// walk returns until: 1. it finishes walking all pairs 2. the function returns false. -// walk returns the position at where it stopped. If it stopped after -// finishing walking, -1 will be returned. -func (g *generation) walk(f func(rev revision) bool) int { - l := len(g.revs) - for i := range g.revs { - ok := f(g.revs[l-i-1]) - if !ok { - return l - i - 1 - } - } - return -1 -} - -func (g *generation) String() string { - return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs) -} - -func (a generation) equal(b generation) bool { - if a.ver != b.ver { - return false - } - if len(a.revs) != len(b.revs) { - return false - } - - for i := range a.revs { - ar, br := a.revs[i], b.revs[i] - if ar != br { - return false - } - } - return true -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go deleted file mode 100644 index 6636347aa..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kv.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -type RangeOptions struct { - Limit int64 - Rev int64 - Count bool -} - -type RangeResult struct { - KVs []mvccpb.KeyValue - Rev int64 - Count int -} - -type ReadView interface { - // FirstRev returns the first KV revision at the time of opening the txn. - // After a compaction, the first revision increases to the compaction - // revision. - FirstRev() int64 - - // Rev returns the revision of the KV at the time of opening the txn. - Rev() int64 - - // Range gets the keys in the range at rangeRev. - // The returned rev is the current revision of the KV when the operation is executed. - // If rangeRev <=0, range gets the keys at currentRev. - // If `end` is nil, the request returns the key. - // If `end` is not nil and not empty, it gets the keys in range [key, range_end). - // If `end` is not nil and empty, it gets the keys greater than or equal to key. - // Limit limits the number of keys returned. - // If the required rev is compacted, ErrCompacted will be returned. - Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) -} - -// TxnRead represents a read-only transaction with operations that will not -// block other read transactions. -type TxnRead interface { - ReadView - // End marks the transaction is complete and ready to commit. - End() -} - -type WriteView interface { - // DeleteRange deletes the given range from the store. - // A deleteRange increases the rev of the store if any key in the range exists. - // The number of key deleted will be returned. - // The returned rev is the current revision of the KV when the operation is executed. - // It also generates one event for each key delete in the event history. - // if the `end` is nil, deleteRange deletes the key. - // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end). - DeleteRange(key, end []byte) (n, rev int64) - - // Put puts the given key, value into the store. Put also takes additional argument lease to - // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease - // id. - // A put also increases the rev of the store, and generates one event in the event history. - // The returned rev is the current revision of the KV when the operation is executed. - Put(key, value []byte, lease lease.LeaseID) (rev int64) -} - -// TxnWrite represents a transaction that can modify the store. -type TxnWrite interface { - TxnRead - WriteView - // Changes gets the changes made since opening the write txn. - Changes() []mvccpb.KeyValue -} - -// txnReadWrite coerces a read txn to a write, panicking on any write operation. -type txnReadWrite struct{ TxnRead } - -func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") } -func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - panic("unexpected Put") -} -func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil } - -func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} } - -type KV interface { - ReadView - WriteView - - // Read creates a read transaction. - Read() TxnRead - - // Write creates a write transaction. - Write() TxnWrite - - // Hash retrieves the hash of KV state and revision. - // This method is designed for consistency checking purposes. - Hash() (hash uint32, revision int64, err error) - - // Compact frees all superseded keys with revisions less than rev. - Compact(rev int64) (<-chan struct{}, error) - - // Commit commits outstanding txns into the underlying backend. - Commit() - - // Restore restores the KV store from a backend. - Restore(b backend.Backend) error - Close() error -} - -// WatchableKV is a KV that can be watched. -type WatchableKV interface { - KV - Watchable -} - -// Watchable is the interface that wraps the NewWatchStream function. -type Watchable interface { - // NewWatchStream returns a WatchStream that can be used to - // watch events happened or happening on the KV. - NewWatchStream() WatchStream -} - -// ConsistentWatchableKV is a WatchableKV that understands the consistency -// algorithm and consistent index. -// If the consistent index of executing entry is not larger than the -// consistent index of ConsistentWatchableKV, all operations in -// this entry are skipped and return empty response. -type ConsistentWatchableKV interface { - WatchableKV - // ConsistentIndex returns the current consistent index of the KV. - ConsistentIndex() uint64 -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kv_view.go b/vendor/github.com/coreos/etcd/mvcc/kv_view.go deleted file mode 100644 index f40ba8edc..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kv_view.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/lease" -) - -type readView struct{ kv KV } - -func (rv *readView) FirstRev() int64 { - tr := rv.kv.Read() - defer tr.End() - return tr.FirstRev() -} - -func (rv *readView) Rev() int64 { - tr := rv.kv.Read() - defer tr.End() - return tr.Rev() -} - -func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - tr := rv.kv.Read() - defer tr.End() - return tr.Range(key, end, ro) -} - -type writeView struct{ kv KV } - -func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) { - tw := wv.kv.Write() - defer tw.End() - return tw.DeleteRange(key, end) -} - -func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - tw := wv.kv.Write() - defer tw.End() - return tw.Put(key, value, lease) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go deleted file mode 100644 index 28a508ccb..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore.go +++ /dev/null @@ -1,459 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "encoding/binary" - "errors" - "math" - "sync" - "time" - - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/schedule" - "github.com/coreos/pkg/capnslog" - "golang.org/x/net/context" -) - -var ( - keyBucketName = []byte("key") - metaBucketName = []byte("meta") - - consistentIndexKeyName = []byte("consistent_index") - scheduledCompactKeyName = []byte("scheduledCompactRev") - finishedCompactKeyName = []byte("finishedCompactRev") - - ErrCompacted = errors.New("mvcc: required revision has been compacted") - ErrFutureRev = errors.New("mvcc: required revision is a future revision") - ErrCanceled = errors.New("mvcc: watcher is canceled") - ErrClosed = errors.New("mvcc: closed") - - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") -) - -const ( - // markedRevBytesLen is the byte length of marked revision. - // The first `revBytesLen` bytes represents a normal revision. The last - // one byte is the mark. - markedRevBytesLen = revBytesLen + 1 - markBytePosition = markedRevBytesLen - 1 - markTombstone byte = 't' -) - -var restoreChunkKeys = 10000 // non-const for testing - -// ConsistentIndexGetter is an interface that wraps the Get method. -// Consistent index is the offset of an entry in a consistent replicated log. -type ConsistentIndexGetter interface { - // ConsistentIndex returns the consistent index of current executing entry. - ConsistentIndex() uint64 -} - -type store struct { - ReadView - WriteView - - // mu read locks for txns and write locks for non-txn store changes. - mu sync.RWMutex - - ig ConsistentIndexGetter - - b backend.Backend - kvindex index - - le lease.Lessor - - // revMuLock protects currentRev and compactMainRev. - // Locked at end of write txn and released after write txn unlock lock. - // Locked before locking read txn and released after locking. - revMu sync.RWMutex - // currentRev is the revision of the last completed transaction. - currentRev int64 - // compactMainRev is the main revision of the last compaction. - compactMainRev int64 - - // bytesBuf8 is a byte slice of length 8 - // to avoid a repetitive allocation in saveIndex. - bytesBuf8 []byte - - fifoSched schedule.Scheduler - - stopc chan struct{} -} - -// NewStore returns a new store. It is useful to create a store inside -// mvcc pkg. It should only be used for testing externally. -func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store { - s := &store{ - b: b, - ig: ig, - kvindex: newTreeIndex(), - - le: le, - - currentRev: 1, - compactMainRev: -1, - - bytesBuf8: make([]byte, 8), - fifoSched: schedule.NewFIFOScheduler(), - - stopc: make(chan struct{}), - } - s.ReadView = &readView{s} - s.WriteView = &writeView{s} - if s.le != nil { - s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) - } - - tx := s.b.BatchTx() - tx.Lock() - tx.UnsafeCreateBucket(keyBucketName) - tx.UnsafeCreateBucket(metaBucketName) - tx.Unlock() - s.b.ForceCommit() - - if err := s.restore(); err != nil { - // TODO: return the error instead of panic here? - panic("failed to recover store from backend") - } - - return s -} - -func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { - if ctx == nil || ctx.Err() != nil { - s.mu.Lock() - select { - case <-s.stopc: - default: - f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } - s.fifoSched.Schedule(f) - } - s.mu.Unlock() - return - } - close(ch) -} - -func (s *store) Hash() (hash uint32, revision int64, err error) { - s.b.ForceCommit() - h, err := s.b.Hash(DefaultIgnores) - return h, s.currentRev, err -} - -func (s *store) Compact(rev int64) (<-chan struct{}, error) { - s.mu.Lock() - defer s.mu.Unlock() - s.revMu.Lock() - defer s.revMu.Unlock() - - if rev <= s.compactMainRev { - ch := make(chan struct{}) - f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } - s.fifoSched.Schedule(f) - return ch, ErrCompacted - } - if rev > s.currentRev { - return nil, ErrFutureRev - } - - start := time.Now() - - s.compactMainRev = rev - - rbytes := newRevBytes() - revToBytes(revision{main: rev}, rbytes) - - tx := s.b.BatchTx() - tx.Lock() - tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes) - tx.Unlock() - // ensure that desired compaction is persisted - s.b.ForceCommit() - - keep := s.kvindex.Compact(rev) - ch := make(chan struct{}) - var j = func(ctx context.Context) { - if ctx.Err() != nil { - s.compactBarrier(ctx, ch) - return - } - if !s.scheduleCompaction(rev, keep) { - s.compactBarrier(nil, ch) - return - } - close(ch) - } - - s.fifoSched.Schedule(j) - - indexCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond)) - return ch, nil -} - -// DefaultIgnores is a map of keys to ignore in hash checking. -var DefaultIgnores map[backend.IgnoreKey]struct{} - -func init() { - DefaultIgnores = map[backend.IgnoreKey]struct{}{ - // consistent index might be changed due to v2 internal sync, which - // is not controllable by the user. - {Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {}, - } -} - -func (s *store) Commit() { - s.mu.Lock() - defer s.mu.Unlock() - - tx := s.b.BatchTx() - tx.Lock() - s.saveIndex(tx) - tx.Unlock() - s.b.ForceCommit() -} - -func (s *store) Restore(b backend.Backend) error { - s.mu.Lock() - defer s.mu.Unlock() - - close(s.stopc) - s.fifoSched.Stop() - - s.b = b - s.kvindex = newTreeIndex() - s.currentRev = 1 - s.compactMainRev = -1 - s.fifoSched = schedule.NewFIFOScheduler() - s.stopc = make(chan struct{}) - - return s.restore() -} - -func (s *store) restore() error { - reportDbTotalSizeInBytesMu.Lock() - b := s.b - reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } - reportDbTotalSizeInBytesMu.Unlock() - - min, max := newRevBytes(), newRevBytes() - revToBytes(revision{main: 1}, min) - revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max) - - keyToLease := make(map[string]lease.LeaseID) - - // restore index - tx := s.b.BatchTx() - tx.Lock() - - _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0) - if len(finishedCompactBytes) != 0 { - s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main - plog.Printf("restore compact to %d", s.compactMainRev) - } - _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) - scheduledCompact := int64(0) - if len(scheduledCompactBytes) != 0 { - scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main - } - - // index keys concurrently as they're loaded in from tx - keysGauge.Set(0) - rkvc, revc := restoreIntoIndex(s.kvindex) - for { - keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys)) - if len(keys) == 0 { - break - } - // rkvc blocks if the total pending keys exceeds the restore - // chunk size to keep keys from consuming too much memory. - restoreChunk(rkvc, keys, vals, keyToLease) - if len(keys) < restoreChunkKeys { - // partial set implies final set - break - } - // next set begins after where this one ended - newMin := bytesToRev(keys[len(keys)-1][:revBytesLen]) - newMin.sub++ - revToBytes(newMin, min) - } - close(rkvc) - s.currentRev = <-revc - - // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. - // the correct revision should be set to compaction revision in the case, not the largest revision - // we have seen. - if s.currentRev < s.compactMainRev { - s.currentRev = s.compactMainRev - } - if scheduledCompact <= s.compactMainRev { - scheduledCompact = 0 - } - - for key, lid := range keyToLease { - if s.le == nil { - panic("no lessor to attach lease") - } - err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}}) - if err != nil { - plog.Errorf("unexpected Attach error: %v", err) - } - } - - tx.Unlock() - - if scheduledCompact != 0 { - s.Compact(scheduledCompact) - plog.Printf("resume scheduled compaction at %d", scheduledCompact) - } - - return nil -} - -type revKeyValue struct { - key []byte - kv mvccpb.KeyValue - kstr string -} - -func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { - rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) - go func() { - currentRev := int64(1) - defer func() { revc <- currentRev }() - // restore the tree index from streaming the unordered index. - kiCache := make(map[string]*keyIndex, restoreChunkKeys) - for rkv := range rkvc { - ki, ok := kiCache[rkv.kstr] - // purge kiCache if many keys but still missing in the cache - if !ok && len(kiCache) >= restoreChunkKeys { - i := 10 - for k := range kiCache { - delete(kiCache, k) - if i--; i == 0 { - break - } - } - } - // cache miss, fetch from tree index if there - if !ok { - ki = &keyIndex{key: rkv.kv.Key} - if idxKey := idx.KeyIndex(ki); idxKey != nil { - kiCache[rkv.kstr], ki = idxKey, idxKey - ok = true - } - } - rev := bytesToRev(rkv.key) - currentRev = rev.main - if ok { - if isTombstone(rkv.key) { - ki.tombstone(rev.main, rev.sub) - continue - } - ki.put(rev.main, rev.sub) - } else if !isTombstone(rkv.key) { - ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) - idx.Insert(ki) - kiCache[rkv.kstr] = ki - } - } - }() - return rkvc, revc -} - -func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { - for i, key := range keys { - rkv := revKeyValue{key: key} - if err := rkv.kv.Unmarshal(vals[i]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - rkv.kstr = string(rkv.kv.Key) - if isTombstone(key) { - delete(keyToLease, rkv.kstr) - } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease { - keyToLease[rkv.kstr] = lid - } else { - delete(keyToLease, rkv.kstr) - } - kvc <- rkv - } -} - -func (s *store) Close() error { - close(s.stopc) - s.fifoSched.Stop() - return nil -} - -func (a *store) Equal(b *store) bool { - if a.currentRev != b.currentRev { - return false - } - if a.compactMainRev != b.compactMainRev { - return false - } - return a.kvindex.Equal(b.kvindex) -} - -func (s *store) saveIndex(tx backend.BatchTx) { - if s.ig == nil { - return - } - bs := s.bytesBuf8 - binary.BigEndian.PutUint64(bs, s.ig.ConsistentIndex()) - // put the index into the underlying backend - // tx has been locked in TxnBegin, so there is no need to lock it again - tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs) -} - -func (s *store) ConsistentIndex() uint64 { - // TODO: cache index in a uint64 field? - tx := s.b.BatchTx() - tx.Lock() - defer tx.Unlock() - _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0) - if len(vs) == 0 { - return 0 - } - return binary.BigEndian.Uint64(vs[0]) -} - -// appendMarkTombstone appends tombstone mark to normal revision bytes. -func appendMarkTombstone(b []byte) []byte { - if len(b) != revBytesLen { - plog.Panicf("cannot append mark to non normal revision bytes") - } - return append(b, markTombstone) -} - -// isTombstone checks whether the revision bytes is a tombstone. -func isTombstone(b []byte) bool { - return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone -} - -// revBytesRange returns the range of revision bytes at -// the given revision. -func revBytesRange(rev revision) (start, end []byte) { - start = newRevBytes() - revToBytes(rev, start) - - end = newRevBytes() - endRev := revision{main: rev.main, sub: rev.sub + 1} - revToBytes(endRev, end) - - return start, end -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go deleted file mode 100644 index bbd38f547..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "encoding/binary" - "time" -) - -func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool { - totalStart := time.Now() - defer dbCompactionTotalDurations.Observe(float64(time.Since(totalStart) / time.Millisecond)) - - end := make([]byte, 8) - binary.BigEndian.PutUint64(end, uint64(compactMainRev+1)) - - batchsize := int64(10000) - last := make([]byte, 8+1+8) - for { - var rev revision - - start := time.Now() - tx := s.b.BatchTx() - tx.Lock() - - keys, _ := tx.UnsafeRange(keyBucketName, last, end, batchsize) - for _, key := range keys { - rev = bytesToRev(key) - if _, ok := keep[rev]; !ok { - tx.UnsafeDelete(keyBucketName, key) - } - } - - if len(keys) < int(batchsize) { - rbytes := make([]byte, 8+1+8) - revToBytes(revision{main: compactMainRev}, rbytes) - tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes) - tx.Unlock() - plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart)) - return true - } - - // update last - revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last) - tx.Unlock() - dbCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond)) - - select { - case <-time.After(100 * time.Millisecond): - case <-s.stopc: - return false - } - } -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go deleted file mode 100644 index 13d4d530d..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -type storeTxnRead struct { - s *store - tx backend.ReadTx - - firstRev int64 - rev int64 -} - -func (s *store) Read() TxnRead { - s.mu.RLock() - tx := s.b.ReadTx() - s.revMu.RLock() - tx.Lock() - firstRev, rev := s.compactMainRev, s.currentRev - s.revMu.RUnlock() - return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev}) -} - -func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev } -func (tr *storeTxnRead) Rev() int64 { return tr.rev } - -func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - return tr.rangeKeys(key, end, tr.Rev(), ro) -} - -func (tr *storeTxnRead) End() { - tr.tx.Unlock() - tr.s.mu.RUnlock() -} - -type storeTxnWrite struct { - *storeTxnRead - tx backend.BatchTx - // beginRev is the revision where the txn begins; it will write to the next revision. - beginRev int64 - changes []mvccpb.KeyValue -} - -func (s *store) Write() TxnWrite { - s.mu.RLock() - tx := s.b.BatchTx() - tx.Lock() - tw := &storeTxnWrite{ - storeTxnRead: &storeTxnRead{s, tx, 0, 0}, - tx: tx, - beginRev: s.currentRev, - changes: make([]mvccpb.KeyValue, 0, 4), - } - return newMetricsTxnWrite(tw) -} - -func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev } - -func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - rev := tw.beginRev - if len(tw.changes) > 0 { - rev++ - } - return tw.rangeKeys(key, end, rev, ro) -} - -func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) { - if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 { - return n, int64(tw.beginRev + 1) - } - return 0, int64(tw.beginRev) -} - -func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 { - tw.put(key, value, lease) - return int64(tw.beginRev + 1) -} - -func (tw *storeTxnWrite) End() { - // only update index if the txn modifies the mvcc state. - if len(tw.changes) != 0 { - tw.s.saveIndex(tw.tx) - // hold revMu lock to prevent new read txns from opening until writeback. - tw.s.revMu.Lock() - tw.s.currentRev++ - } - tw.tx.Unlock() - if len(tw.changes) != 0 { - tw.s.revMu.Unlock() - } - tw.s.mu.RUnlock() -} - -func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { - rev := ro.Rev - if rev > curRev { - return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev - } - if rev <= 0 { - rev = curRev - } - if rev < tr.s.compactMainRev { - return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted - } - - _, revpairs := tr.s.kvindex.Range(key, end, int64(rev)) - if len(revpairs) == 0 { - return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil - } - if ro.Count { - return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil - } - - var kvs []mvccpb.KeyValue - for _, revpair := range revpairs { - start, end := revBytesRange(revpair) - _, vs := tr.tx.UnsafeRange(keyBucketName, start, end, 0) - if len(vs) != 1 { - plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) - } - - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vs[0]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - kvs = append(kvs, kv) - if ro.Limit > 0 && len(kvs) >= int(ro.Limit) { - break - } - } - return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil -} - -func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { - rev := tw.beginRev + 1 - c := rev - oldLease := lease.NoLease - - // if the key exists before, use its previous created and - // get its previous leaseID - _, created, ver, err := tw.s.kvindex.Get(key, rev) - if err == nil { - c = created.main - oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)}) - } - - ibytes := newRevBytes() - idxRev := revision{main: rev, sub: int64(len(tw.changes))} - revToBytes(idxRev, ibytes) - - ver = ver + 1 - kv := mvccpb.KeyValue{ - Key: key, - Value: value, - CreateRevision: c, - ModRevision: rev, - Version: ver, - Lease: int64(leaseID), - } - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - tw.s.kvindex.Put(key, idxRev) - tw.changes = append(tw.changes, kv) - - if oldLease != lease.NoLease { - if tw.s.le == nil { - panic("no lessor to detach lease") - } - err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - plog.Errorf("unexpected error from lease detach: %v", err) - } - } - if leaseID != lease.NoLease { - if tw.s.le == nil { - panic("no lessor to attach lease") - } - err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - panic("unexpected error from lease Attach") - } - } -} - -func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { - rrev := tw.beginRev - if len(tw.changes) > 0 { - rrev += 1 - } - keys, revs := tw.s.kvindex.Range(key, end, rrev) - if len(keys) == 0 { - return 0 - } - for i, key := range keys { - tw.delete(key, revs[i]) - } - return int64(len(keys)) -} - -func (tw *storeTxnWrite) delete(key []byte, rev revision) { - ibytes := newRevBytes() - idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))} - revToBytes(idxRev, ibytes) - ibytes = appendMarkTombstone(ibytes) - - kv := mvccpb.KeyValue{Key: key} - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - err = tw.s.kvindex.Tombstone(key, idxRev) - if err != nil { - plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) - } - tw.changes = append(tw.changes, kv) - - item := lease.LeaseItem{Key: string(key)} - leaseID := tw.s.le.GetLease(item) - - if leaseID != lease.NoLease { - err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) - if err != nil { - plog.Errorf("cannot detach %v", err) - } - } -} - -func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes } diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go deleted file mode 100644 index a65fe59b9..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/metrics.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "sync" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - rangeCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "range_total", - Help: "Total number of ranges seen by this member.", - }) - - putCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "put_total", - Help: "Total number of puts seen by this member.", - }) - - deleteCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "delete_total", - Help: "Total number of deletes seen by this member.", - }) - - txnCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "txn_total", - Help: "Total number of txns seen by this member.", - }) - - keysGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "keys_total", - Help: "Total number of keys.", - }) - - watchStreamGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "watch_stream_total", - Help: "Total number of watch streams.", - }) - - watcherGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "watcher_total", - Help: "Total number of watchers.", - }) - - slowWatcherGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "slow_watcher_total", - Help: "Total number of unsynced slow watchers.", - }) - - totalEventsCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "events_total", - Help: "Total number of events sent by this member.", - }) - - pendingEventsGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "pending_events_total", - Help: "Total number of pending events to be sent.", - }) - - indexCompactionPauseDurations = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "index_compaction_pause_duration_milliseconds", - Help: "Bucketed histogram of index compaction pause duration.", - // 0.5ms -> 1second - Buckets: prometheus.ExponentialBuckets(0.5, 2, 12), - }) - - dbCompactionPauseDurations = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_compaction_pause_duration_milliseconds", - Help: "Bucketed histogram of db compaction pause duration.", - // 1ms -> 4second - Buckets: prometheus.ExponentialBuckets(1, 2, 13), - }) - - dbCompactionTotalDurations = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_compaction_total_duration_milliseconds", - Help: "Bucketed histogram of db compaction total duration.", - // 100ms -> 800second - Buckets: prometheus.ExponentialBuckets(100, 2, 14), - }) - - dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_total_size_in_bytes", - Help: "Total size of the underlying database in bytes.", - }, - func() float64 { - reportDbTotalSizeInBytesMu.RLock() - defer reportDbTotalSizeInBytesMu.RUnlock() - return reportDbTotalSizeInBytes() - }, - ) - // overridden by mvcc initialization - reportDbTotalSizeInBytesMu sync.RWMutex - reportDbTotalSizeInBytes func() float64 = func() float64 { return 0 } -) - -func init() { - prometheus.MustRegister(rangeCounter) - prometheus.MustRegister(putCounter) - prometheus.MustRegister(deleteCounter) - prometheus.MustRegister(txnCounter) - prometheus.MustRegister(keysGauge) - prometheus.MustRegister(watchStreamGauge) - prometheus.MustRegister(watcherGauge) - prometheus.MustRegister(slowWatcherGauge) - prometheus.MustRegister(totalEventsCounter) - prometheus.MustRegister(pendingEventsGauge) - prometheus.MustRegister(indexCompactionPauseDurations) - prometheus.MustRegister(dbCompactionPauseDurations) - prometheus.MustRegister(dbCompactionTotalDurations) - prometheus.MustRegister(dbTotalSize) -} - -// ReportEventReceived reports that an event is received. -// This function should be called when the external systems received an -// event from mvcc.Watcher. -func ReportEventReceived(n int) { - pendingEventsGauge.Sub(float64(n)) - totalEventsCounter.Add(float64(n)) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go deleted file mode 100644 index fd2144279..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/lease" -) - -type metricsTxnWrite struct { - TxnWrite - ranges uint - puts uint - deletes uint -} - -func newMetricsTxnRead(tr TxnRead) TxnRead { - return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0} -} - -func newMetricsTxnWrite(tw TxnWrite) TxnWrite { - return &metricsTxnWrite{tw, 0, 0, 0} -} - -func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) { - tw.ranges++ - return tw.TxnWrite.Range(key, end, ro) -} - -func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) { - tw.deletes++ - return tw.TxnWrite.DeleteRange(key, end) -} - -func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - tw.puts++ - return tw.TxnWrite.Put(key, value, lease) -} - -func (tw *metricsTxnWrite) End() { - defer tw.TxnWrite.End() - if sum := tw.ranges + tw.puts + tw.deletes; sum != 1 { - if sum > 1 { - txnCounter.Inc() - } - return - } - switch { - case tw.ranges == 1: - rangeCounter.Inc() - case tw.puts == 1: - putCounter.Inc() - case tw.deletes == 1: - deleteCounter.Inc() - } -} diff --git a/vendor/github.com/coreos/etcd/mvcc/revision.go b/vendor/github.com/coreos/etcd/mvcc/revision.go deleted file mode 100644 index 5fa35a1c2..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/revision.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import "encoding/binary" - -// revBytesLen is the byte length of a normal revision. -// First 8 bytes is the revision.main in big-endian format. The 9th byte -// is a '_'. The last 8 bytes is the revision.sub in big-endian format. -const revBytesLen = 8 + 1 + 8 - -// A revision indicates modification of the key-value space. -// The set of changes that share same main revision changes the key-value space atomically. -type revision struct { - // main is the main revision of a set of changes that happen atomically. - main int64 - - // sub is the the sub revision of a change in a set of changes that happen - // atomically. Each change has different increasing sub revision in that - // set. - sub int64 -} - -func (a revision) GreaterThan(b revision) bool { - if a.main > b.main { - return true - } - if a.main < b.main { - return false - } - return a.sub > b.sub -} - -func newRevBytes() []byte { - return make([]byte, revBytesLen, markedRevBytesLen) -} - -func revToBytes(rev revision, bytes []byte) { - binary.BigEndian.PutUint64(bytes, uint64(rev.main)) - bytes[8] = '_' - binary.BigEndian.PutUint64(bytes[9:], uint64(rev.sub)) -} - -func bytesToRev(bytes []byte) revision { - return revision{ - main: int64(binary.BigEndian.Uint64(bytes[0:8])), - sub: int64(binary.BigEndian.Uint64(bytes[9:])), - } -} - -type revisions []revision - -func (a revisions) Len() int { return len(a) } -func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) } -func (a revisions) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/coreos/etcd/mvcc/util.go b/vendor/github.com/coreos/etcd/mvcc/util.go deleted file mode 100644 index 8a0df0bfc..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/util.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "encoding/binary" - - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -func UpdateConsistentIndex(be backend.Backend, index uint64) { - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var oldi uint64 - _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0) - if len(vs) != 0 { - oldi = binary.BigEndian.Uint64(vs[0]) - } - - if index <= oldi { - return - } - - bs := make([]byte, 8) - binary.BigEndian.PutUint64(bs, index) - tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs) -} - -func WriteKV(be backend.Backend, kv mvccpb.KeyValue) { - ibytes := newRevBytes() - revToBytes(revision{main: kv.ModRevision}, ibytes) - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - be.BatchTx().Lock() - be.BatchTx().UnsafePut(keyBucketName, ibytes, d) - be.BatchTx().Unlock() -} diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go deleted file mode 100644 index 68d9ab71d..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "sync" - "time" - - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -const ( - // chanBufLen is the length of the buffered chan - // for sending out watched events. - // TODO: find a good buf value. 1024 is just a random one that - // seems to be reasonable. - chanBufLen = 1024 - - // maxWatchersPerSync is the number of watchers to sync in a single batch - maxWatchersPerSync = 512 -) - -type watchable interface { - watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) - progress(w *watcher) - rev() int64 -} - -type watchableStore struct { - *store - - // mu protects watcher groups and batches. It should never be locked - // before locking store.mu to avoid deadlock. - mu sync.RWMutex - - // victims are watcher batches that were blocked on the watch channel - victims []watcherBatch - victimc chan struct{} - - // contains all unsynced watchers that needs to sync with events that have happened - unsynced watcherGroup - - // contains all synced watchers that are in sync with the progress of the store. - // The key of the map is the key that the watcher watches on. - synced watcherGroup - - stopc chan struct{} - wg sync.WaitGroup -} - -// cancelFunc updates unsynced and synced maps when running -// cancel operations. -type cancelFunc func() - -func New(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV { - return newWatchableStore(b, le, ig) -} - -func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore { - s := &watchableStore{ - store: NewStore(b, le, ig), - victimc: make(chan struct{}, 1), - unsynced: newWatcherGroup(), - synced: newWatcherGroup(), - stopc: make(chan struct{}), - } - s.store.ReadView = &readView{s} - s.store.WriteView = &writeView{s} - if s.le != nil { - // use this store as the deleter so revokes trigger watch events - s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) - } - s.wg.Add(2) - go s.syncWatchersLoop() - go s.syncVictimsLoop() - return s -} - -func (s *watchableStore) Close() error { - close(s.stopc) - s.wg.Wait() - return s.store.Close() -} - -func (s *watchableStore) NewWatchStream() WatchStream { - watchStreamGauge.Inc() - return &watchStream{ - watchable: s, - ch: make(chan WatchResponse, chanBufLen), - cancels: make(map[WatchID]cancelFunc), - watchers: make(map[WatchID]*watcher), - } -} - -func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) { - wa := &watcher{ - key: key, - end: end, - minRev: startRev, - id: id, - ch: ch, - fcs: fcs, - } - - s.mu.Lock() - s.revMu.RLock() - synced := startRev > s.store.currentRev || startRev == 0 - if synced { - wa.minRev = s.store.currentRev + 1 - if startRev > wa.minRev { - wa.minRev = startRev - } - } - if synced { - s.synced.add(wa) - } else { - slowWatcherGauge.Inc() - s.unsynced.add(wa) - } - s.revMu.RUnlock() - s.mu.Unlock() - - watcherGauge.Inc() - - return wa, func() { s.cancelWatcher(wa) } -} - -// cancelWatcher removes references of the watcher from the watchableStore -func (s *watchableStore) cancelWatcher(wa *watcher) { - for { - s.mu.Lock() - - if s.unsynced.delete(wa) { - slowWatcherGauge.Dec() - break - } else if s.synced.delete(wa) { - break - } else if wa.compacted { - break - } - - if !wa.victim { - panic("watcher not victim but not in watch groups") - } - - var victimBatch watcherBatch - for _, wb := range s.victims { - if wb[wa] != nil { - victimBatch = wb - break - } - } - if victimBatch != nil { - slowWatcherGauge.Dec() - delete(victimBatch, wa) - break - } - - // victim being processed so not accessible; retry - s.mu.Unlock() - time.Sleep(time.Millisecond) - } - - watcherGauge.Dec() - s.mu.Unlock() -} - -func (s *watchableStore) Restore(b backend.Backend) error { - s.mu.Lock() - defer s.mu.Unlock() - err := s.store.Restore(b) - if err != nil { - return err - } - - for wa := range s.synced.watchers { - s.unsynced.watchers.add(wa) - } - s.synced = newWatcherGroup() - return nil -} - -// syncWatchersLoop syncs the watcher in the unsynced map every 100ms. -func (s *watchableStore) syncWatchersLoop() { - defer s.wg.Done() - - for { - s.mu.RLock() - st := time.Now() - lastUnsyncedWatchers := s.unsynced.size() - s.mu.RUnlock() - - unsyncedWatchers := 0 - if lastUnsyncedWatchers > 0 { - unsyncedWatchers = s.syncWatchers() - } - syncDuration := time.Since(st) - - waitDuration := 100 * time.Millisecond - // more work pending? - if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers { - // be fair to other store operations by yielding time taken - waitDuration = syncDuration - } - - select { - case <-time.After(waitDuration): - case <-s.stopc: - return - } - } -} - -// syncVictimsLoop tries to write precomputed watcher responses to -// watchers that had a blocked watcher channel -func (s *watchableStore) syncVictimsLoop() { - defer s.wg.Done() - - for { - for s.moveVictims() != 0 { - // try to update all victim watchers - } - s.mu.RLock() - isEmpty := len(s.victims) == 0 - s.mu.RUnlock() - - var tickc <-chan time.Time - if !isEmpty { - tickc = time.After(10 * time.Millisecond) - } - - select { - case <-tickc: - case <-s.victimc: - case <-s.stopc: - return - } - } -} - -// moveVictims tries to update watches with already pending event data -func (s *watchableStore) moveVictims() (moved int) { - s.mu.Lock() - victims := s.victims - s.victims = nil - s.mu.Unlock() - - var newVictim watcherBatch - for _, wb := range victims { - // try to send responses again - for w, eb := range wb { - // watcher has observed the store up to, but not including, w.minRev - rev := w.minRev - 1 - if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { - pendingEventsGauge.Add(float64(len(eb.evs))) - } else { - if newVictim == nil { - newVictim = make(watcherBatch) - } - newVictim[w] = eb - continue - } - moved++ - } - - // assign completed victim watchers to unsync/sync - s.mu.Lock() - s.store.revMu.RLock() - curRev := s.store.currentRev - for w, eb := range wb { - if newVictim != nil && newVictim[w] != nil { - // couldn't send watch response; stays victim - continue - } - w.victim = false - if eb.moreRev != 0 { - w.minRev = eb.moreRev - } - if w.minRev <= curRev { - s.unsynced.add(w) - } else { - slowWatcherGauge.Dec() - s.synced.add(w) - } - } - s.store.revMu.RUnlock() - s.mu.Unlock() - } - - if len(newVictim) > 0 { - s.mu.Lock() - s.victims = append(s.victims, newVictim) - s.mu.Unlock() - } - - return moved -} - -// syncWatchers syncs unsynced watchers by: -// 1. choose a set of watchers from the unsynced watcher group -// 2. iterate over the set to get the minimum revision and remove compacted watchers -// 3. use minimum revision to get all key-value pairs and send those events to watchers -// 4. remove synced watchers in set from unsynced group and move to synced group -func (s *watchableStore) syncWatchers() int { - s.mu.Lock() - defer s.mu.Unlock() - - if s.unsynced.size() == 0 { - return 0 - } - - s.store.revMu.RLock() - defer s.store.revMu.RUnlock() - - // in order to find key-value pairs from unsynced watchers, we need to - // find min revision index, and these revisions can be used to - // query the backend store of key-value pairs - curRev := s.store.currentRev - compactionRev := s.store.compactMainRev - - wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev) - minBytes, maxBytes := newRevBytes(), newRevBytes() - revToBytes(revision{main: minRev}, minBytes) - revToBytes(revision{main: curRev + 1}, maxBytes) - - // UnsafeRange returns keys and values. And in boltdb, keys are revisions. - // values are actual key-value pairs in backend. - tx := s.store.b.ReadTx() - tx.Lock() - revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0) - evs := kvsToEvents(wg, revs, vs) - tx.Unlock() - - var victims watcherBatch - wb := newWatcherBatch(wg, evs) - for w := range wg.watchers { - w.minRev = curRev + 1 - - eb, ok := wb[w] - if !ok { - // bring un-notified watcher to synced - s.synced.add(w) - s.unsynced.delete(w) - continue - } - - if eb.moreRev != 0 { - w.minRev = eb.moreRev - } - - if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) { - pendingEventsGauge.Add(float64(len(eb.evs))) - } else { - if victims == nil { - victims = make(watcherBatch) - } - w.victim = true - } - - if w.victim { - victims[w] = eb - } else { - if eb.moreRev != 0 { - // stay unsynced; more to read - continue - } - s.synced.add(w) - } - s.unsynced.delete(w) - } - s.addVictim(victims) - - vsz := 0 - for _, v := range s.victims { - vsz += len(v) - } - slowWatcherGauge.Set(float64(s.unsynced.size() + vsz)) - - return s.unsynced.size() -} - -// kvsToEvents gets all events for the watchers from all key-value pairs -func kvsToEvents(wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) { - for i, v := range vals { - var kv mvccpb.KeyValue - if err := kv.Unmarshal(v); err != nil { - plog.Panicf("cannot unmarshal event: %v", err) - } - - if !wg.contains(string(kv.Key)) { - continue - } - - ty := mvccpb.PUT - if isTombstone(revs[i]) { - ty = mvccpb.DELETE - // patch in mod revision so watchers won't skip - kv.ModRevision = bytesToRev(revs[i]).main - } - evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty}) - } - return evs -} - -// notify notifies the fact that given event at the given rev just happened to -// watchers that watch on the key of the event. -func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { - var victim watcherBatch - for w, eb := range newWatcherBatch(&s.synced, evs) { - if eb.revs != 1 { - plog.Panicf("unexpected multiple revisions in notification") - } - - if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { - pendingEventsGauge.Add(float64(len(eb.evs))) - } else { - // move slow watcher to victims - w.minRev = rev + 1 - if victim == nil { - victim = make(watcherBatch) - } - w.victim = true - victim[w] = eb - s.synced.delete(w) - slowWatcherGauge.Inc() - } - } - s.addVictim(victim) -} - -func (s *watchableStore) addVictim(victim watcherBatch) { - if victim == nil { - return - } - s.victims = append(s.victims, victim) - select { - case s.victimc <- struct{}{}: - default: - } -} - -func (s *watchableStore) rev() int64 { return s.store.Rev() } - -func (s *watchableStore) progress(w *watcher) { - s.mu.RLock() - defer s.mu.RUnlock() - - if _, ok := s.synced.watchers[w]; ok { - w.send(WatchResponse{WatchID: w.id, Revision: s.rev()}) - // If the ch is full, this watcher is receiving events. - // We do not need to send progress at all. - } -} - -type watcher struct { - // the watcher key - key []byte - // end indicates the end of the range to watch. - // If end is set, the watcher is on a range. - end []byte - - // victim is set when ch is blocked and undergoing victim processing - victim bool - - // compacted is set when the watcher is removed because of compaction - compacted bool - - // minRev is the minimum revision update the watcher will accept - minRev int64 - id WatchID - - fcs []FilterFunc - // a chan to send out the watch response. - // The chan might be shared with other watchers. - ch chan<- WatchResponse -} - -func (w *watcher) send(wr WatchResponse) bool { - progressEvent := len(wr.Events) == 0 - - if len(w.fcs) != 0 { - ne := make([]mvccpb.Event, 0, len(wr.Events)) - for i := range wr.Events { - filtered := false - for _, filter := range w.fcs { - if filter(wr.Events[i]) { - filtered = true - break - } - } - if !filtered { - ne = append(ne, wr.Events[i]) - } - } - wr.Events = ne - } - - // if all events are filtered out, we should send nothing. - if !progressEvent && len(wr.Events) == 0 { - return true - } - select { - case w.ch <- wr: - return true - default: - return false - } -} diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go deleted file mode 100644 index 5c5bfda13..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/mvcc/mvccpb" -) - -func (tw *watchableStoreTxnWrite) End() { - changes := tw.Changes() - if len(changes) == 0 { - tw.TxnWrite.End() - return - } - - rev := tw.Rev() + 1 - evs := make([]mvccpb.Event, len(changes)) - for i, change := range changes { - evs[i].Kv = &changes[i] - if change.CreateRevision == 0 { - evs[i].Type = mvccpb.DELETE - evs[i].Kv.ModRevision = rev - } else { - evs[i].Type = mvccpb.PUT - } - } - - // end write txn under watchable store lock so the updates are visible - // when asynchronous event posting checks the current store revision - tw.s.mu.Lock() - tw.s.notify(rev, evs) - tw.TxnWrite.End() - tw.s.mu.Unlock() -} - -type watchableStoreTxnWrite struct { - TxnWrite - s *watchableStore -} - -func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} } diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher.go b/vendor/github.com/coreos/etcd/mvcc/watcher.go deleted file mode 100644 index 9468d4269..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/watcher.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "bytes" - "errors" - "sync" - - "github.com/coreos/etcd/mvcc/mvccpb" -) - -var ( - ErrWatcherNotExist = errors.New("mvcc: watcher does not exist") -) - -type WatchID int64 - -// FilterFunc returns true if the given event should be filtered out. -type FilterFunc func(e mvccpb.Event) bool - -type WatchStream interface { - // Watch creates a watcher. The watcher watches the events happening or - // happened on the given key or range [key, end) from the given startRev. - // - // The whole event history can be watched unless compacted. - // If `startRev` <=0, watch observes events after currentRev. - // - // The returned `id` is the ID of this watcher. It appears as WatchID - // in events that are sent to the created watcher through stream channel. - // - Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID - - // Chan returns a chan. All watch response will be sent to the returned chan. - Chan() <-chan WatchResponse - - // RequestProgress requests the progress of the watcher with given ID. The response - // will only be sent if the watcher is currently synced. - // The responses will be sent through the WatchRespone Chan attached - // with this stream to ensure correct ordering. - // The responses contains no events. The revision in the response is the progress - // of the watchers since the watcher is currently synced. - RequestProgress(id WatchID) - - // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be - // returned. - Cancel(id WatchID) error - - // Close closes Chan and release all related resources. - Close() - - // Rev returns the current revision of the KV the stream watches on. - Rev() int64 -} - -type WatchResponse struct { - // WatchID is the WatchID of the watcher this response sent to. - WatchID WatchID - - // Events contains all the events that needs to send. - Events []mvccpb.Event - - // Revision is the revision of the KV when the watchResponse is created. - // For a normal response, the revision should be the same as the last - // modified revision inside Events. For a delayed response to a unsynced - // watcher, the revision is greater than the last modified revision - // inside Events. - Revision int64 - - // CompactRevision is set when the watcher is cancelled due to compaction. - CompactRevision int64 -} - -// watchStream contains a collection of watchers that share -// one streaming chan to send out watched events and other control events. -type watchStream struct { - watchable watchable - ch chan WatchResponse - - mu sync.Mutex // guards fields below it - // nextID is the ID pre-allocated for next new watcher in this stream - nextID WatchID - closed bool - cancels map[WatchID]cancelFunc - watchers map[WatchID]*watcher -} - -// Watch creates a new watcher in the stream and returns its WatchID. -// TODO: return error if ws is closed? -func (ws *watchStream) Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID { - // prevent wrong range where key >= end lexicographically - // watch request with 'WithFromKey' has empty-byte range end - if len(end) != 0 && bytes.Compare(key, end) != -1 { - return -1 - } - - ws.mu.Lock() - defer ws.mu.Unlock() - if ws.closed { - return -1 - } - - id := ws.nextID - ws.nextID++ - - w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...) - - ws.cancels[id] = c - ws.watchers[id] = w - return id -} - -func (ws *watchStream) Chan() <-chan WatchResponse { - return ws.ch -} - -func (ws *watchStream) Cancel(id WatchID) error { - ws.mu.Lock() - cancel, ok := ws.cancels[id] - ok = ok && !ws.closed - if ok { - delete(ws.cancels, id) - delete(ws.watchers, id) - } - ws.mu.Unlock() - if !ok { - return ErrWatcherNotExist - } - cancel() - return nil -} - -func (ws *watchStream) Close() { - ws.mu.Lock() - defer ws.mu.Unlock() - - for _, cancel := range ws.cancels { - cancel() - } - ws.closed = true - close(ws.ch) - watchStreamGauge.Dec() -} - -func (ws *watchStream) Rev() int64 { - ws.mu.Lock() - defer ws.mu.Unlock() - return ws.watchable.rev() -} - -func (ws *watchStream) RequestProgress(id WatchID) { - ws.mu.Lock() - w, ok := ws.watchers[id] - ws.mu.Unlock() - if !ok { - return - } - ws.watchable.progress(w) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go deleted file mode 100644 index 6ef1d0ce8..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "math" - - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/adt" -) - -var ( - // watchBatchMaxRevs is the maximum distinct revisions that - // may be sent to an unsynced watcher at a time. Declared as - // var instead of const for testing purposes. - watchBatchMaxRevs = 1000 -) - -type eventBatch struct { - // evs is a batch of revision-ordered events - evs []mvccpb.Event - // revs is the minimum unique revisions observed for this batch - revs int - // moreRev is first revision with more events following this batch - moreRev int64 -} - -func (eb *eventBatch) add(ev mvccpb.Event) { - if eb.revs > watchBatchMaxRevs { - // maxed out batch size - return - } - - if len(eb.evs) == 0 { - // base case - eb.revs = 1 - eb.evs = append(eb.evs, ev) - return - } - - // revision accounting - ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision - evRev := ev.Kv.ModRevision - if evRev > ebRev { - eb.revs++ - if eb.revs > watchBatchMaxRevs { - eb.moreRev = evRev - return - } - } - - eb.evs = append(eb.evs, ev) -} - -type watcherBatch map[*watcher]*eventBatch - -func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) { - eb := wb[w] - if eb == nil { - eb = &eventBatch{} - wb[w] = eb - } - eb.add(ev) -} - -// newWatcherBatch maps watchers to their matched events. It enables quick -// events look up by watcher. -func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch { - if len(wg.watchers) == 0 { - return nil - } - - wb := make(watcherBatch) - for _, ev := range evs { - for w := range wg.watcherSetByKey(string(ev.Kv.Key)) { - if ev.Kv.ModRevision >= w.minRev { - // don't double notify - wb.add(w, ev) - } - } - } - return wb -} - -type watcherSet map[*watcher]struct{} - -func (w watcherSet) add(wa *watcher) { - if _, ok := w[wa]; ok { - panic("add watcher twice!") - } - w[wa] = struct{}{} -} - -func (w watcherSet) union(ws watcherSet) { - for wa := range ws { - w.add(wa) - } -} - -func (w watcherSet) delete(wa *watcher) { - if _, ok := w[wa]; !ok { - panic("removing missing watcher!") - } - delete(w, wa) -} - -type watcherSetByKey map[string]watcherSet - -func (w watcherSetByKey) add(wa *watcher) { - set := w[string(wa.key)] - if set == nil { - set = make(watcherSet) - w[string(wa.key)] = set - } - set.add(wa) -} - -func (w watcherSetByKey) delete(wa *watcher) bool { - k := string(wa.key) - if v, ok := w[k]; ok { - if _, ok := v[wa]; ok { - delete(v, wa) - if len(v) == 0 { - // remove the set; nothing left - delete(w, k) - } - return true - } - } - return false -} - -// watcherGroup is a collection of watchers organized by their ranges -type watcherGroup struct { - // keyWatchers has the watchers that watch on a single key - keyWatchers watcherSetByKey - // ranges has the watchers that watch a range; it is sorted by interval - ranges adt.IntervalTree - // watchers is the set of all watchers - watchers watcherSet -} - -func newWatcherGroup() watcherGroup { - return watcherGroup{ - keyWatchers: make(watcherSetByKey), - watchers: make(watcherSet), - } -} - -// add puts a watcher in the group. -func (wg *watcherGroup) add(wa *watcher) { - wg.watchers.add(wa) - if wa.end == nil { - wg.keyWatchers.add(wa) - return - } - - // interval already registered? - ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) - if iv := wg.ranges.Find(ivl); iv != nil { - iv.Val.(watcherSet).add(wa) - return - } - - // not registered, put in interval tree - ws := make(watcherSet) - ws.add(wa) - wg.ranges.Insert(ivl, ws) -} - -// contains is whether the given key has a watcher in the group. -func (wg *watcherGroup) contains(key string) bool { - _, ok := wg.keyWatchers[key] - return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key)) -} - -// size gives the number of unique watchers in the group. -func (wg *watcherGroup) size() int { return len(wg.watchers) } - -// delete removes a watcher from the group. -func (wg *watcherGroup) delete(wa *watcher) bool { - if _, ok := wg.watchers[wa]; !ok { - return false - } - wg.watchers.delete(wa) - if wa.end == nil { - wg.keyWatchers.delete(wa) - return true - } - - ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) - iv := wg.ranges.Find(ivl) - if iv == nil { - return false - } - - ws := iv.Val.(watcherSet) - delete(ws, wa) - if len(ws) == 0 { - // remove interval missing watchers - if ok := wg.ranges.Delete(ivl); !ok { - panic("could not remove watcher from interval tree") - } - } - - return true -} - -// choose selects watchers from the watcher group to update -func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) { - if len(wg.watchers) < maxWatchers { - return wg, wg.chooseAll(curRev, compactRev) - } - ret := newWatcherGroup() - for w := range wg.watchers { - if maxWatchers <= 0 { - break - } - maxWatchers-- - ret.add(w) - } - return &ret, ret.chooseAll(curRev, compactRev) -} - -func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 { - minRev := int64(math.MaxInt64) - for w := range wg.watchers { - if w.minRev > curRev { - panic("watcher current revision should not exceed current revision") - } - if w.minRev < compactRev { - select { - case w.ch <- WatchResponse{WatchID: w.id, CompactRevision: compactRev}: - w.compacted = true - wg.delete(w) - default: - // retry next time - } - continue - } - if minRev > w.minRev { - minRev = w.minRev - } - } - return minRev -} - -// watcherSetByKey gets the set of watchers that receive events on the given key. -func (wg *watcherGroup) watcherSetByKey(key string) watcherSet { - wkeys := wg.keyWatchers[key] - wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key)) - - // zero-copy cases - switch { - case len(wranges) == 0: - // no need to merge ranges or copy; reuse single-key set - return wkeys - case len(wranges) == 0 && len(wkeys) == 0: - return nil - case len(wranges) == 1 && len(wkeys) == 0: - return wranges[0].Val.(watcherSet) - } - - // copy case - ret := make(watcherSet) - ret.union(wg.keyWatchers[key]) - for _, item := range wranges { - ret.union(item.Val.(watcherSet)) - } - return ret -} diff --git a/vendor/github.com/coreos/go-semver/example.go b/vendor/github.com/coreos/go-semver/example.go deleted file mode 100644 index fd2ee5af2..000000000 --- a/vendor/github.com/coreos/go-semver/example.go +++ /dev/null @@ -1,20 +0,0 @@ -package main - -import ( - "fmt" - "github.com/coreos/go-semver/semver" - "os" -) - -func main() { - vA, err := semver.NewVersion(os.Args[1]) - if err != nil { - fmt.Println(err.Error()) - } - vB, err := semver.NewVersion(os.Args[2]) - if err != nil { - fmt.Println(err.Error()) - } - - fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB)) -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index bb6733231..c83641619 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -1,6 +1,6 @@ ISC License -Copyright (c) 2012-2013 Dave Collins +Copyright (c) 2012-2016 Dave Collins Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index d42a0bc4a..8a4a6589a 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index e47a4e795..1fe3cf3d5 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 14f02dc15..7c519ff47 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go index 9db83c66f..2e3d22f31 100644 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go index 5be0c4060..aacaac6f1 100644 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -91,6 +91,15 @@ The following configuration options are available: which only accept pointer receivers from non-pointer variables. Pointer method invocation is enabled by default. + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + * ContinueOnMethod Enables recursion into types after invoking error and Stringer interface methods. Recursion after method invocation is disabled by default. diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index 95f9dbfe5..df1d582a7 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index ecf3b80e2..c49875bac 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go index d8233f542..32c0e3388 100644 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/docker/cli/cli/cobra.go b/vendor/github.com/docker/cli/cli/cobra.go deleted file mode 100644 index 3a5691a8c..000000000 --- a/vendor/github.com/docker/cli/cli/cobra.go +++ /dev/null @@ -1,162 +0,0 @@ -package cli - -import ( - "fmt" - "strings" - - "github.com/docker/docker/pkg/term" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -// SetupRootCommand sets default usage, help, and error handling for the -// root command. -func SetupRootCommand(rootCmd *cobra.Command) { - cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) - cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) - cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) - cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) - cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) - cobra.AddTemplateFunc("useLine", UseLine) - - rootCmd.SetUsageTemplate(usageTemplate) - rootCmd.SetHelpTemplate(helpTemplate) - rootCmd.SetFlagErrorFunc(FlagErrorFunc) - rootCmd.SetHelpCommand(helpCommand) - - rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") - rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") - rootCmd.PersistentFlags().Lookup("help").Hidden = true -} - -// FlagErrorFunc prints an error message which matches the format of the -// docker/cli/cli error messages -func FlagErrorFunc(cmd *cobra.Command, err error) error { - if err == nil { - return nil - } - - usage := "" - if cmd.HasSubCommands() { - usage = "\n\n" + cmd.UsageString() - } - return StatusError{ - Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), - StatusCode: 125, - } -} - -var helpCommand = &cobra.Command{ - Use: "help [command]", - Short: "Help about the command", - PersistentPreRun: func(cmd *cobra.Command, args []string) {}, - PersistentPostRun: func(cmd *cobra.Command, args []string) {}, - RunE: func(c *cobra.Command, args []string) error { - cmd, args, e := c.Root().Find(args) - if cmd == nil || e != nil || len(args) > 0 { - return errors.Errorf("unknown help topic: %v", strings.Join(args, " ")) - } - - helpFunc := cmd.HelpFunc() - helpFunc(cmd, args) - return nil - }, -} - -func hasSubCommands(cmd *cobra.Command) bool { - return len(operationSubCommands(cmd)) > 0 -} - -func hasManagementSubCommands(cmd *cobra.Command) bool { - return len(managementSubCommands(cmd)) > 0 -} - -func operationSubCommands(cmd *cobra.Command) []*cobra.Command { - cmds := []*cobra.Command{} - for _, sub := range cmd.Commands() { - if sub.IsAvailableCommand() && !sub.HasSubCommands() { - cmds = append(cmds, sub) - } - } - return cmds -} - -func wrappedFlagUsages(cmd *cobra.Command) string { - width := 80 - if ws, err := term.GetWinsize(0); err == nil { - width = int(ws.Width) - } - return cmd.Flags().FlagUsagesWrapped(width - 1) -} - -func managementSubCommands(cmd *cobra.Command) []*cobra.Command { - cmds := []*cobra.Command{} - for _, sub := range cmd.Commands() { - if sub.IsAvailableCommand() && sub.HasSubCommands() { - cmds = append(cmds, sub) - } - } - return cmds -} - -// UseLine returns the usage line for a command. This implementation is different -// from the default Command.UseLine in that it does not add a `[flags]` to the -// end of the line. -func UseLine(cmd *cobra.Command) string { - if cmd.HasParent() { - return cmd.Parent().CommandPath() + " " + cmd.Use - } - return cmd.Use -} - -var usageTemplate = `Usage: - -{{- if not .HasSubCommands}} {{ useLine . }}{{end}} -{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} - -{{ .Short | trim }} - -{{- if gt .Aliases 0}} - -Aliases: - {{.NameAndAliases}} - -{{- end}} -{{- if .HasExample}} - -Examples: -{{ .Example }} - -{{- end}} -{{- if .HasFlags}} - -Options: -{{ wrappedFlagUsages . | trimRightSpace}} - -{{- end}} -{{- if hasManagementSubCommands . }} - -Management Commands: - -{{- range managementSubCommands . }} - {{rpad .Name .NamePadding }} {{.Short}} -{{- end}} - -{{- end}} -{{- if hasSubCommands .}} - -Commands: - -{{- range operationSubCommands . }} - {{rpad .Name .NamePadding }} {{.Short}} -{{- end}} -{{- end}} - -{{- if .HasSubCommands }} - -Run '{{.CommandPath}} COMMAND --help' for more information on a command. -{{- end}} -` - -var helpTemplate = ` -{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/vendor/github.com/docker/cli/cli/command/cli.go b/vendor/github.com/docker/cli/cli/command/cli.go deleted file mode 100644 index ff4b78adc..000000000 --- a/vendor/github.com/docker/cli/cli/command/cli.go +++ /dev/null @@ -1,258 +0,0 @@ -package command - -import ( - "io" - "net" - "net/http" - "os" - "runtime" - "time" - - "github.com/docker/cli/cli" - cliconfig "github.com/docker/cli/cli/config" - "github.com/docker/cli/cli/config/configfile" - cliflags "github.com/docker/cli/cli/flags" - "github.com/docker/cli/cli/trust" - dopts "github.com/docker/cli/opts" - "github.com/docker/docker/api" - "github.com/docker/docker/client" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/theupdateframework/notary" - notaryclient "github.com/theupdateframework/notary/client" - "github.com/theupdateframework/notary/passphrase" - "golang.org/x/net/context" -) - -// Streams is an interface which exposes the standard input and output streams -type Streams interface { - In() *InStream - Out() *OutStream - Err() io.Writer -} - -// Cli represents the docker command line client. -type Cli interface { - Client() client.APIClient - Out() *OutStream - Err() io.Writer - In() *InStream - SetIn(in *InStream) - ConfigFile() *configfile.ConfigFile - ServerInfo() ServerInfo - NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) -} - -// DockerCli is an instance the docker command line client. -// Instances of the client can be returned from NewDockerCli. -type DockerCli struct { - configFile *configfile.ConfigFile - in *InStream - out *OutStream - err io.Writer - client client.APIClient - defaultVersion string - server ServerInfo -} - -// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified. -func (cli *DockerCli) DefaultVersion() string { - return cli.defaultVersion -} - -// Client returns the APIClient -func (cli *DockerCli) Client() client.APIClient { - return cli.client -} - -// Out returns the writer used for stdout -func (cli *DockerCli) Out() *OutStream { - return cli.out -} - -// Err returns the writer used for stderr -func (cli *DockerCli) Err() io.Writer { - return cli.err -} - -// SetIn sets the reader used for stdin -func (cli *DockerCli) SetIn(in *InStream) { - cli.in = in -} - -// In returns the reader used for stdin -func (cli *DockerCli) In() *InStream { - return cli.in -} - -// ShowHelp shows the command help. -func ShowHelp(err io.Writer) func(*cobra.Command, []string) error { - return func(cmd *cobra.Command, args []string) error { - cmd.SetOutput(err) - cmd.HelpFunc()(cmd, args) - return nil - } -} - -// ConfigFile returns the ConfigFile -func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { - return cli.configFile -} - -// ServerInfo returns the server version details for the host this client is -// connected to -func (cli *DockerCli) ServerInfo() ServerInfo { - return cli.server -} - -// Initialize the dockerCli runs initialization that must happen after command -// line flags are parsed. -func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error { - cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err) - - var err error - cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) - if tlsconfig.IsErrEncryptedKey(err) { - passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil) - newClient := func(password string) (client.APIClient, error) { - opts.Common.TLSOptions.Passphrase = password - return NewAPIClientFromFlags(opts.Common, cli.configFile) - } - cli.client, err = getClientWithPassword(passRetriever, newClient) - } - if err != nil { - return err - } - cli.initializeFromClient() - return nil -} - -func (cli *DockerCli) initializeFromClient() { - cli.defaultVersion = cli.client.ClientVersion() - - ping, err := cli.client.Ping(context.Background()) - if err != nil { - // Default to true if we fail to connect to daemon - cli.server = ServerInfo{HasExperimental: true} - - if ping.APIVersion != "" { - cli.client.NegotiateAPIVersionPing(ping) - } - return - } - - cli.server = ServerInfo{ - HasExperimental: ping.Experimental, - OSType: ping.OSType, - } - cli.client.NegotiateAPIVersionPing(ping) -} - -func getClientWithPassword(passRetriever notary.PassRetriever, newClient func(password string) (client.APIClient, error)) (client.APIClient, error) { - for attempts := 0; ; attempts++ { - passwd, giveup, err := passRetriever("private", "encrypted TLS private", false, attempts) - if giveup || err != nil { - return nil, errors.Wrap(err, "private key is encrypted, but could not get passphrase") - } - - apiclient, err := newClient(passwd) - if !tlsconfig.IsErrEncryptedKey(err) { - return apiclient, err - } - } -} - -// NotaryClient provides a Notary Repository to interact with signed metadata for an image -func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) { - return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...) -} - -// ServerInfo stores details about the supported features and platform of the -// server -type ServerInfo struct { - HasExperimental bool - OSType string -} - -// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. -func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli { - return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err} -} - -// NewAPIClientFromFlags creates a new APIClient from command line flags -func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { - host, err := getServerHost(opts.Hosts, opts.TLSOptions) - if err != nil { - return &client.Client{}, err - } - - customHeaders := configFile.HTTPHeaders - if customHeaders == nil { - customHeaders = map[string]string{} - } - customHeaders["User-Agent"] = UserAgent() - - verStr := api.DefaultVersion - if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { - verStr = tmpStr - } - - httpClient, err := newHTTPClient(host, opts.TLSOptions) - if err != nil { - return &client.Client{}, err - } - - return client.NewClient(host, verStr, httpClient, customHeaders) -} - -func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) { - var host string - switch len(hosts) { - case 0: - host = os.Getenv("DOCKER_HOST") - case 1: - host = hosts[0] - default: - return "", errors.New("Please specify only one -H") - } - - return dopts.ParseHost(tlsOptions != nil, host) -} - -func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { - if tlsOptions == nil { - // let the api client configure the default transport. - return nil, nil - } - opts := *tlsOptions - opts.ExclusiveRootPools = true - config, err := tlsconfig.Client(opts) - if err != nil { - return nil, err - } - tr := &http.Transport{ - TLSClientConfig: config, - DialContext: (&net.Dialer{ - KeepAlive: 30 * time.Second, - Timeout: 30 * time.Second, - }).DialContext, - } - proto, addr, _, err := client.ParseHost(host) - if err != nil { - return nil, err - } - - sockets.ConfigureTransport(tr, proto, addr) - - return &http.Client{ - Transport: tr, - CheckRedirect: client.CheckRedirect, - }, nil -} - -// UserAgent returns the user agent string used for making API requests -func UserAgent() string { - return "Docker-Client/" + cli.Version + " (" + runtime.GOOS + ")" -} diff --git a/vendor/github.com/docker/cli/cli/command/events_utils.go b/vendor/github.com/docker/cli/cli/command/events_utils.go deleted file mode 100644 index 16d76892a..000000000 --- a/vendor/github.com/docker/cli/cli/command/events_utils.go +++ /dev/null @@ -1,47 +0,0 @@ -package command - -import ( - "sync" - - eventtypes "github.com/docker/docker/api/types/events" - "github.com/sirupsen/logrus" -) - -// EventHandler is abstract interface for user to customize -// own handle functions of each type of events -type EventHandler interface { - Handle(action string, h func(eventtypes.Message)) - Watch(c <-chan eventtypes.Message) -} - -// InitEventHandler initializes and returns an EventHandler -func InitEventHandler() EventHandler { - return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} -} - -type eventHandler struct { - handlers map[string]func(eventtypes.Message) - mu sync.Mutex -} - -func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { - w.mu.Lock() - w.handlers[action] = h - w.mu.Unlock() -} - -// Watch ranges over the passed in event chan and processes the events based on the -// handlers created for a given action. -// To stop watching, close the event chan. -func (w *eventHandler) Watch(c <-chan eventtypes.Message) { - for e := range c { - w.mu.Lock() - h, exists := w.handlers[e.Action] - w.mu.Unlock() - if !exists { - continue - } - logrus.Debugf("event handler: received event: %v", e) - go h(e) - } -} diff --git a/vendor/github.com/docker/cli/cli/command/image/build.go b/vendor/github.com/docker/cli/cli/command/image/build.go deleted file mode 100644 index 637068a80..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/build.go +++ /dev/null @@ -1,589 +0,0 @@ -package image - -import ( - "archive/tar" - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "regexp" - "runtime" - - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/command/image/build" - "github.com/docker/cli/opts" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/urlutil" - units "github.com/docker/go-units" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type buildOptions struct { - context string - dockerfileName string - tags opts.ListOpts - labels opts.ListOpts - buildArgs opts.ListOpts - extraHosts opts.ListOpts - ulimits *opts.UlimitOpt - memory opts.MemBytes - memorySwap opts.MemSwapBytes - shmSize opts.MemBytes - cpuShares int64 - cpuPeriod int64 - cpuQuota int64 - cpuSetCpus string - cpuSetMems string - cgroupParent string - isolation string - quiet bool - noCache bool - rm bool - forceRm bool - pull bool - cacheFrom []string - compress bool - securityOpt []string - networkMode string - squash bool - target string - imageIDFile string - stream bool - platform string -} - -// dockerfileFromStdin returns true when the user specified that the Dockerfile -// should be read from stdin instead of a file -func (o buildOptions) dockerfileFromStdin() bool { - return o.dockerfileName == "-" -} - -// contextFromStdin returns true when the user specified that the build context -// should be read from stdin -func (o buildOptions) contextFromStdin() bool { - return o.context == "-" -} - -func newBuildOptions() buildOptions { - ulimits := make(map[string]*units.Ulimit) - return buildOptions{ - tags: opts.NewListOpts(validateTag), - buildArgs: opts.NewListOpts(opts.ValidateEnv), - ulimits: opts.NewUlimitOpt(&ulimits), - labels: opts.NewListOpts(opts.ValidateEnv), - extraHosts: opts.NewListOpts(opts.ValidateExtraHost), - } -} - -// NewBuildCommand creates a new `docker build` command -func NewBuildCommand(dockerCli command.Cli) *cobra.Command { - options := newBuildOptions() - - cmd := &cobra.Command{ - Use: "build [OPTIONS] PATH | URL | -", - Short: "Build an image from a Dockerfile", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - options.context = args[0] - return runBuild(dockerCli, options) - }, - } - - flags := cmd.Flags() - - flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") - flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") - flags.Var(options.ulimits, "ulimit", "Ulimit options") - flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") - flags.VarP(&options.memory, "memory", "m", "Memory limit") - flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm") - flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") - flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") - flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") - flags.Var(&options.labels, "label", "Set metadata for an image") - flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") - flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") - flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") - flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") - flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") - flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources") - flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip") - flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options") - flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build") - flags.SetAnnotation("network", "version", []string{"1.25"}) - flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") - flags.StringVar(&options.target, "target", "", "Set the target build stage to build.") - flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file") - - command.AddTrustVerificationFlags(flags) - command.AddPlatformFlag(flags, &options.platform) - - flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer") - flags.SetAnnotation("squash", "experimental", nil) - flags.SetAnnotation("squash", "version", []string{"1.25"}) - - flags.BoolVar(&options.stream, "stream", false, "Stream attaches to server to negotiate build context") - flags.SetAnnotation("stream", "experimental", nil) - flags.SetAnnotation("stream", "version", []string{"1.31"}) - - return cmd -} - -// lastProgressOutput is the same as progress.Output except -// that it only output with the last update. It is used in -// non terminal scenarios to suppress verbose messages -type lastProgressOutput struct { - output progress.Output -} - -// WriteProgress formats progress information from a ProgressReader. -func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error { - if !prog.LastUpdate { - return nil - } - - return out.output.WriteProgress(prog) -} - -// nolint: gocyclo -func runBuild(dockerCli command.Cli, options buildOptions) error { - var ( - buildCtx io.ReadCloser - dockerfileCtx io.ReadCloser - err error - contextDir string - tempDir string - relDockerfile string - progBuff io.Writer - buildBuff io.Writer - remote string - ) - - if options.dockerfileFromStdin() { - if options.contextFromStdin() { - return errors.New("invalid argument: can't use stdin for both build context and dockerfile") - } - dockerfileCtx = dockerCli.In() - } - - specifiedContext := options.context - progBuff = dockerCli.Out() - buildBuff = dockerCli.Out() - if options.quiet { - progBuff = bytes.NewBuffer(nil) - buildBuff = bytes.NewBuffer(nil) - } - if options.imageIDFile != "" { - // Avoid leaving a stale file if we eventually fail - if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "Removing image ID file") - } - } - - switch { - case options.contextFromStdin(): - // buildCtx is tar archive. if stdin was dockerfile then it is wrapped - buildCtx, relDockerfile, err = build.GetContextFromReader(dockerCli.In(), options.dockerfileName) - case isLocalDir(specifiedContext): - contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, options.dockerfileName) - case urlutil.IsGitURL(specifiedContext): - tempDir, relDockerfile, err = build.GetContextFromGitURL(specifiedContext, options.dockerfileName) - case urlutil.IsURL(specifiedContext): - buildCtx, relDockerfile, err = build.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) - default: - return errors.Errorf("unable to prepare context: path %q not found", specifiedContext) - } - - if err != nil { - if options.quiet && urlutil.IsURL(specifiedContext) { - fmt.Fprintln(dockerCli.Err(), progBuff) - } - return errors.Errorf("unable to prepare context: %s", err) - } - - if tempDir != "" { - defer os.RemoveAll(tempDir) - contextDir = tempDir - } - - // read from a directory into tar archive - if buildCtx == nil && !options.stream { - excludes, err := build.ReadDockerignore(contextDir) - if err != nil { - return err - } - - if err := build.ValidateContextDirectory(contextDir, excludes); err != nil { - return errors.Errorf("error checking context: '%s'.", err) - } - - // And canonicalize dockerfile name to a platform-independent one - relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) - if err != nil { - return errors.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) - } - - excludes = build.TrimBuildFilesFromExcludes(excludes, relDockerfile, options.dockerfileFromStdin()) - buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ - ExcludePatterns: excludes, - ChownOpts: &idtools.IDPair{UID: 0, GID: 0}, - }) - if err != nil { - return err - } - } - - // replace Dockerfile if it was added from stdin and there is archive context - if dockerfileCtx != nil && buildCtx != nil { - buildCtx, relDockerfile, err = build.AddDockerfileToBuildContext(dockerfileCtx, buildCtx) - if err != nil { - return err - } - } - - // if streaming and dockerfile was not from stdin then read from file - // to the same reader that is usually stdin - if options.stream && dockerfileCtx == nil { - dockerfileCtx, err = os.Open(relDockerfile) - if err != nil { - return errors.Wrapf(err, "failed to open %s", relDockerfile) - } - defer dockerfileCtx.Close() - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var resolvedTags []*resolvedTag - if command.IsTrusted() { - translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { - return TrustedReference(ctx, dockerCli, ref, nil) - } - // if there is a tar wrapper, the dockerfile needs to be replaced inside it - if buildCtx != nil { - // Wrap the tar archive to replace the Dockerfile entry with the rewritten - // Dockerfile which uses trusted pulls. - buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags) - } else if dockerfileCtx != nil { - // if there was not archive context still do the possible replacements in Dockerfile - newDockerfile, _, err := rewriteDockerfileFrom(ctx, dockerfileCtx, translator) - if err != nil { - return err - } - dockerfileCtx = ioutil.NopCloser(bytes.NewBuffer(newDockerfile)) - } - } - - if options.compress { - buildCtx, err = build.Compress(buildCtx) - if err != nil { - return err - } - } - - // Setup an upload progress bar - progressOutput := streamformatter.NewProgressOutput(progBuff) - if !dockerCli.Out().IsTerminal() { - progressOutput = &lastProgressOutput{output: progressOutput} - } - - // if up to this point nothing has set the context then we must have another - // way for sending it(streaming) and set the context to the Dockerfile - if dockerfileCtx != nil && buildCtx == nil { - buildCtx = dockerfileCtx - } - - s, err := trySession(dockerCli, contextDir) - if err != nil { - return err - } - - var body io.Reader - if buildCtx != nil && !options.stream { - body = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") - } - - // add context stream to the session - if options.stream && s != nil { - syncDone := make(chan error) // used to signal first progress reporting completed. - // progress would also send errors but don't need it here as errors - // are handled by session.Run() and ImageBuild() - if err := addDirToSession(s, contextDir, progressOutput, syncDone); err != nil { - return err - } - - buf := newBufferedWriter(syncDone, buildBuff) - defer func() { - select { - case <-buf.flushed: - case <-ctx.Done(): - } - }() - buildBuff = buf - - remote = clientSessionRemote - body = buildCtx - } - - configFile := dockerCli.ConfigFile() - authConfigs, _ := configFile.GetAllCredentials() - buildOptions := types.ImageBuildOptions{ - Memory: options.memory.Value(), - MemorySwap: options.memorySwap.Value(), - Tags: options.tags.GetAll(), - SuppressOutput: options.quiet, - NoCache: options.noCache, - Remove: options.rm, - ForceRemove: options.forceRm, - PullParent: options.pull, - Isolation: container.Isolation(options.isolation), - CPUSetCPUs: options.cpuSetCpus, - CPUSetMems: options.cpuSetMems, - CPUShares: options.cpuShares, - CPUQuota: options.cpuQuota, - CPUPeriod: options.cpuPeriod, - CgroupParent: options.cgroupParent, - Dockerfile: relDockerfile, - ShmSize: options.shmSize.Value(), - Ulimits: options.ulimits.GetList(), - BuildArgs: configFile.ParseProxyConfig(dockerCli.Client().DaemonHost(), options.buildArgs.GetAll()), - AuthConfigs: authConfigs, - Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), - CacheFrom: options.cacheFrom, - SecurityOpt: options.securityOpt, - NetworkMode: options.networkMode, - Squash: options.squash, - ExtraHosts: options.extraHosts.GetAll(), - Target: options.target, - RemoteContext: remote, - Platform: options.platform, - } - - if s != nil { - go func() { - logrus.Debugf("running session: %v", s.ID()) - if err := s.Run(ctx, dockerCli.Client().DialSession); err != nil { - logrus.Error(err) - cancel() // cancel progress context - } - }() - buildOptions.SessionID = s.ID() - } - - response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) - if err != nil { - if options.quiet { - fmt.Fprintf(dockerCli.Err(), "%s", progBuff) - } - cancel() - return err - } - defer response.Body.Close() - - imageID := "" - aux := func(auxJSON *json.RawMessage) { - var result types.BuildResult - if err := json.Unmarshal(*auxJSON, &result); err != nil { - fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err) - } else { - imageID = result.ID - } - } - - err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), aux) - if err != nil { - if jerr, ok := err.(*jsonmessage.JSONError); ok { - // If no error code is set, default to 1 - if jerr.Code == 0 { - jerr.Code = 1 - } - if options.quiet { - fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) - } - return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} - } - return err - } - - // Windows: show error message about modified file permissions if the - // daemon isn't running Windows. - if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { - fmt.Fprintln(dockerCli.Out(), "SECURITY WARNING: You are building a Docker "+ - "image from Windows against a non-Windows Docker host. All files and "+ - "directories added to build context will have '-rwxr-xr-x' permissions. "+ - "It is recommended to double check and reset permissions for sensitive "+ - "files and directories.") - } - - // Everything worked so if -q was provided the output from the daemon - // should be just the image ID and we'll print that to stdout. - if options.quiet { - imageID = fmt.Sprintf("%s", buildBuff) - fmt.Fprintf(dockerCli.Out(), imageID) - } - - if options.imageIDFile != "" { - if imageID == "" { - return errors.Errorf("Server did not provide an image ID. Cannot write %s", options.imageIDFile) - } - if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil { - return err - } - } - if command.IsTrusted() { - // Since the build was successful, now we must tag any of the resolved - // images from the above Dockerfile rewrite. - for _, resolved := range resolvedTags { - if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil { - return err - } - } - } - - return nil -} - -func isLocalDir(c string) bool { - _, err := os.Stat(c) - return err == nil -} - -type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) - -// validateTag checks if the given image name can be resolved. -func validateTag(rawRepo string) (string, error) { - _, err := reference.ParseNormalizedNamed(rawRepo) - if err != nil { - return "", err - } - - return rawRepo, nil -} - -var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) - -// resolvedTag records the repository, tag, and resolved digest reference -// from a Dockerfile rewrite. -type resolvedTag struct { - digestRef reference.Canonical - tagRef reference.NamedTagged -} - -// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in -// "FROM " instructions to a digest reference. `translator` is a -// function that takes a repository name and tag reference and returns a -// trusted digest reference. -func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { - scanner := bufio.NewScanner(dockerfile) - buf := bytes.NewBuffer(nil) - - // Scan the lines of the Dockerfile, looking for a "FROM" line. - for scanner.Scan() { - line := scanner.Text() - - matches := dockerfileFromLinePattern.FindStringSubmatch(line) - if matches != nil && matches[1] != api.NoBaseImageSpecifier { - // Replace the line with a resolved "FROM repo@digest" - var ref reference.Named - ref, err = reference.ParseNormalizedNamed(matches[1]) - if err != nil { - return nil, nil, err - } - ref = reference.TagNameOnly(ref) - if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() { - trustedRef, err := translator(ctx, ref) - if err != nil { - return nil, nil, err - } - - line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", reference.FamiliarString(trustedRef))) - resolvedTags = append(resolvedTags, &resolvedTag{ - digestRef: trustedRef, - tagRef: ref, - }) - } - } - - _, err := fmt.Fprintln(buf, line) - if err != nil { - return nil, nil, err - } - } - - return buf.Bytes(), resolvedTags, scanner.Err() -} - -// replaceDockerfileTarWrapper wraps the given input tar archive stream and -// replaces the entry with the given Dockerfile name with the contents of the -// new Dockerfile. Returns a new tar archive stream with the replaced -// Dockerfile. -func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - - defer inputTarStream.Close() - - for { - hdr, err := tarReader.Next() - if err == io.EOF { - // Signals end of archive. - tarWriter.Close() - pipeWriter.Close() - return - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - content := io.Reader(tarReader) - if hdr.Name == dockerfileName { - // This entry is the Dockerfile. Since the tar archive was - // generated from a directory on the local filesystem, the - // Dockerfile will only appear once in the archive. - var newDockerfile []byte - newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator) - if err != nil { - pipeWriter.CloseWithError(err) - return - } - hdr.Size = int64(len(newDockerfile)) - content = bytes.NewBuffer(newDockerfile) - } - - if err := tarWriter.WriteHeader(hdr); err != nil { - pipeWriter.CloseWithError(err) - return - } - - if _, err := io.Copy(tarWriter, content); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - }() - - return pipeReader -} diff --git a/vendor/github.com/docker/cli/cli/command/image/build_session.go b/vendor/github.com/docker/cli/cli/command/image/build_session.go deleted file mode 100644 index 897d237cd..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/build_session.go +++ /dev/null @@ -1,153 +0,0 @@ -package image - -import ( - "bytes" - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/command/image/build" - cliconfig "github.com/docker/cli/cli/config" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/pkg/progress" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/filesync" - "github.com/pkg/errors" - "golang.org/x/time/rate" -) - -const clientSessionRemote = "client-session" - -func isSessionSupported(dockerCli command.Cli) bool { - return dockerCli.ServerInfo().HasExperimental && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31") -} - -func trySession(dockerCli command.Cli, contextDir string) (*session.Session, error) { - var s *session.Session - if isSessionSupported(dockerCli) { - sharedKey, err := getBuildSharedKey(contextDir) - if err != nil { - return nil, errors.Wrap(err, "failed to get build shared key") - } - s, err = session.NewSession(filepath.Base(contextDir), sharedKey) - if err != nil { - return nil, errors.Wrap(err, "failed to create session") - } - } - return s, nil -} - -func addDirToSession(session *session.Session, contextDir string, progressOutput progress.Output, done chan error) error { - excludes, err := build.ReadDockerignore(contextDir) - if err != nil { - return err - } - - p := &sizeProgress{out: progressOutput, action: "Streaming build context to Docker daemon"} - - workdirProvider := filesync.NewFSSyncProvider([]filesync.SyncedDir{ - {Dir: contextDir, Excludes: excludes}, - }) - session.Allow(workdirProvider) - - // this will be replaced on parallel build jobs. keep the current - // progressbar for now - if snpc, ok := workdirProvider.(interface { - SetNextProgressCallback(func(int, bool), chan error) - }); ok { - snpc.SetNextProgressCallback(p.update, done) - } - - return nil -} - -type sizeProgress struct { - out progress.Output - action string - limiter *rate.Limiter -} - -func (sp *sizeProgress) update(size int, last bool) { - if sp.limiter == nil { - sp.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1) - } - if last || sp.limiter.Allow() { - sp.out.WriteProgress(progress.Progress{Action: sp.action, Current: int64(size), LastUpdate: last}) - } -} - -type bufferedWriter struct { - done chan error - io.Writer - buf *bytes.Buffer - flushed chan struct{} - mu sync.Mutex -} - -func newBufferedWriter(done chan error, w io.Writer) *bufferedWriter { - bw := &bufferedWriter{done: done, Writer: w, buf: new(bytes.Buffer), flushed: make(chan struct{})} - go func() { - <-done - bw.flushBuffer() - }() - return bw -} - -func (bw *bufferedWriter) Write(dt []byte) (int, error) { - select { - case <-bw.done: - bw.flushBuffer() - return bw.Writer.Write(dt) - default: - return bw.buf.Write(dt) - } -} - -func (bw *bufferedWriter) flushBuffer() { - bw.mu.Lock() - select { - case <-bw.flushed: - default: - bw.Writer.Write(bw.buf.Bytes()) - close(bw.flushed) - } - bw.mu.Unlock() -} - -func getBuildSharedKey(dir string) (string, error) { - // build session is hash of build dir with node based randomness - s := sha256.Sum256([]byte(fmt.Sprintf("%s:%s", tryNodeIdentifier(), dir))) - return hex.EncodeToString(s[:]), nil -} - -func tryNodeIdentifier() string { - out := cliconfig.Dir() // return config dir as default on permission error - if err := os.MkdirAll(cliconfig.Dir(), 0700); err == nil { - sessionFile := filepath.Join(cliconfig.Dir(), ".buildNodeID") - if _, err := os.Lstat(sessionFile); err != nil { - if os.IsNotExist(err) { // create a new file with stored randomness - b := make([]byte, 32) - if _, err := rand.Read(b); err != nil { - return out - } - if err := ioutil.WriteFile(sessionFile, []byte(hex.EncodeToString(b)), 0600); err != nil { - return out - } - } - } - - dt, err := ioutil.ReadFile(sessionFile) - if err == nil { - return string(dt) - } - } - return out -} diff --git a/vendor/github.com/docker/cli/cli/command/image/cmd.go b/vendor/github.com/docker/cli/cli/command/image/cmd.go deleted file mode 100644 index a12bf3395..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/cmd.go +++ /dev/null @@ -1,33 +0,0 @@ -package image - -import ( - "github.com/spf13/cobra" - - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" -) - -// NewImageCommand returns a cobra command for `image` subcommands -func NewImageCommand(dockerCli command.Cli) *cobra.Command { - cmd := &cobra.Command{ - Use: "image", - Short: "Manage images", - Args: cli.NoArgs, - RunE: command.ShowHelp(dockerCli.Err()), - } - cmd.AddCommand( - NewBuildCommand(dockerCli), - NewHistoryCommand(dockerCli), - NewImportCommand(dockerCli), - NewLoadCommand(dockerCli), - NewPullCommand(dockerCli), - NewPushCommand(dockerCli), - NewSaveCommand(dockerCli), - NewTagCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newInspectCommand(dockerCli), - NewPruneCommand(dockerCli), - ) - return cmd -} diff --git a/vendor/github.com/docker/cli/cli/command/image/history.go b/vendor/github.com/docker/cli/cli/command/image/history.go deleted file mode 100644 index 27782d107..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/history.go +++ /dev/null @@ -1,64 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/command/formatter" - "github.com/spf13/cobra" -) - -type historyOptions struct { - image string - - human bool - quiet bool - noTrunc bool - format string -} - -// NewHistoryCommand creates a new `docker history` command -func NewHistoryCommand(dockerCli command.Cli) *cobra.Command { - var opts historyOptions - - cmd := &cobra.Command{ - Use: "history [OPTIONS] IMAGE", - Short: "Show the history of an image", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - return runHistory(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") - - return cmd -} - -func runHistory(dockerCli command.Cli, opts historyOptions) error { - ctx := context.Background() - - history, err := dockerCli.Client().ImageHistory(ctx, opts.image) - if err != nil { - return err - } - - format := opts.format - if len(format) == 0 { - format = formatter.TableFormatKey - } - - historyCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewHistoryFormat(format, opts.quiet, opts.human), - Trunc: !opts.noTrunc, - } - return formatter.HistoryWrite(historyCtx, opts.human, history) -} diff --git a/vendor/github.com/docker/cli/cli/command/image/import.go b/vendor/github.com/docker/cli/cli/command/image/import.go deleted file mode 100644 index 1f7189a95..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/import.go +++ /dev/null @@ -1,87 +0,0 @@ -package image - -import ( - "io" - "os" - - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - dockeropts "github.com/docker/cli/opts" - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/urlutil" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type importOptions struct { - source string - reference string - changes dockeropts.ListOpts - message string -} - -// NewImportCommand creates a new `docker import` command -func NewImportCommand(dockerCli command.Cli) *cobra.Command { - var options importOptions - - cmd := &cobra.Command{ - Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]", - Short: "Import the contents from a tarball to create a filesystem image", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - options.source = args[0] - if len(args) > 1 { - options.reference = args[1] - } - return runImport(dockerCli, options) - }, - } - - flags := cmd.Flags() - - options.changes = dockeropts.NewListOpts(nil) - flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image") - flags.StringVarP(&options.message, "message", "m", "", "Set commit message for imported image") - - return cmd -} - -func runImport(dockerCli command.Cli, options importOptions) error { - var ( - in io.Reader - srcName = options.source - ) - - if options.source == "-" { - in = dockerCli.In() - } else if !urlutil.IsURL(options.source) { - srcName = "-" - file, err := os.Open(options.source) - if err != nil { - return err - } - defer file.Close() - in = file - } - - source := types.ImageImportSource{ - Source: in, - SourceName: srcName, - } - - importOptions := types.ImageImportOptions{ - Message: options.message, - Changes: options.changes.GetAll(), - } - - clnt := dockerCli.Client() - - responseBody, err := clnt.ImageImport(context.Background(), source, options.reference, importOptions) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) -} diff --git a/vendor/github.com/docker/cli/cli/command/image/inspect.go b/vendor/github.com/docker/cli/cli/command/image/inspect.go deleted file mode 100644 index a510e3076..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/inspect.go +++ /dev/null @@ -1,44 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/command/inspect" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string - refs []string -} - -// newInspectCommand creates a new cobra.Command for `docker image inspect` -func newInspectCommand(dockerCli command.Cli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] IMAGE [IMAGE...]", - Short: "Display detailed information on one or more images", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.refs = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - return cmd -} - -func runInspect(dockerCli command.Cli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - getRefFunc := func(ref string) (interface{}, []byte, error) { - return client.ImageInspectWithRaw(ctx, ref) - } - return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) -} diff --git a/vendor/github.com/docker/cli/cli/command/image/list.go b/vendor/github.com/docker/cli/cli/command/image/list.go deleted file mode 100644 index 6dada8252..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/list.go +++ /dev/null @@ -1,95 +0,0 @@ -package image - -import ( - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/command/formatter" - "github.com/docker/cli/opts" - "github.com/docker/docker/api/types" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type imagesOptions struct { - matchName string - - quiet bool - all bool - noTrunc bool - showDigests bool - format string - filter opts.FilterOpt -} - -// NewImagesCommand creates a new `docker images` command -func NewImagesCommand(dockerCli command.Cli) *cobra.Command { - options := imagesOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "images [OPTIONS] [REPOSITORY[:TAG]]", - Short: "List images", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) > 0 { - options.matchName = args[0] - } - return runImages(dockerCli, options) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only show numeric IDs") - flags.BoolVarP(&options.all, "all", "a", false, "Show all images (default hides intermediate images)") - flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") - flags.BoolVar(&options.showDigests, "digests", false, "Show digests") - flags.StringVar(&options.format, "format", "", "Pretty-print images using a Go template") - flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func newListCommand(dockerCli command.Cli) *cobra.Command { - cmd := *NewImagesCommand(dockerCli) - cmd.Aliases = []string{"images", "list"} - cmd.Use = "ls [OPTIONS] [REPOSITORY[:TAG]]" - return &cmd -} - -func runImages(dockerCli command.Cli, options imagesOptions) error { - ctx := context.Background() - - filters := options.filter.Value() - if options.matchName != "" { - filters.Add("reference", options.matchName) - } - - listOptions := types.ImageListOptions{ - All: options.all, - Filters: filters, - } - - images, err := dockerCli.Client().ImageList(ctx, listOptions) - if err != nil { - return err - } - - format := options.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !options.quiet { - format = dockerCli.ConfigFile().ImagesFormat - } else { - format = formatter.TableFormatKey - } - } - - imageCtx := formatter.ImageContext{ - Context: formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewImageFormat(format, options.quiet, options.showDigests), - Trunc: !options.noTrunc, - }, - Digest: options.showDigests, - } - return formatter.ImageWrite(imageCtx, images) -} diff --git a/vendor/github.com/docker/cli/cli/command/image/load.go b/vendor/github.com/docker/cli/cli/command/image/load.go deleted file mode 100644 index 6708599fd..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/load.go +++ /dev/null @@ -1,77 +0,0 @@ -package image - -import ( - "io" - - "golang.org/x/net/context" - - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -type loadOptions struct { - input string - quiet bool -} - -// NewLoadCommand creates a new `docker load` command -func NewLoadCommand(dockerCli command.Cli) *cobra.Command { - var opts loadOptions - - cmd := &cobra.Command{ - Use: "load [OPTIONS]", - Short: "Load an image from a tar archive or STDIN", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runLoad(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output") - - return cmd -} - -func runLoad(dockerCli command.Cli, opts loadOptions) error { - - var input io.Reader = dockerCli.In() - if opts.input != "" { - // We use system.OpenSequential to use sequential file access on Windows, avoiding - // depleting the standby list un-necessarily. On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(opts.input) - if err != nil { - return err - } - defer file.Close() - input = file - } - - // To avoid getting stuck, verify that a tar file is given either in - // the input flag or through stdin and if not display an error message and exit. - if opts.input == "" && dockerCli.In().IsTerminal() { - return errors.Errorf("requested load from stdin, but stdin is empty") - } - - if !dockerCli.Out().IsTerminal() { - opts.quiet = true - } - response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) - if err != nil { - return err - } - defer response.Body.Close() - - if response.Body != nil && response.JSON { - return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil) - } - - _, err = io.Copy(dockerCli.Out(), response.Body) - return err -} diff --git a/vendor/github.com/docker/cli/cli/command/image/prune.go b/vendor/github.com/docker/cli/cli/command/image/prune.go deleted file mode 100644 index 41590e4ec..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/prune.go +++ /dev/null @@ -1,95 +0,0 @@ -package image - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - "github.com/docker/cli/opts" - units "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type pruneOptions struct { - force bool - all bool - filter opts.FilterOpt -} - -// NewPruneCommand returns a new cobra prune command for images -func NewPruneCommand(dockerCli command.Cli) *cobra.Command { - options := pruneOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "prune [OPTIONS]", - Short: "Remove unused images", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - spaceReclaimed, output, err := runPrune(dockerCli, options) - if err != nil { - return err - } - if output != "" { - fmt.Fprintln(dockerCli.Out(), output) - } - fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) - return nil - }, - Annotations: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") - flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones") - flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=')") - - return cmd -} - -const ( - allImageWarning = `WARNING! This will remove all images without at least one container associated to them. -Are you sure you want to continue?` - danglingWarning = `WARNING! This will remove all dangling images. -Are you sure you want to continue?` -) - -func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) { - pruneFilters := options.filter.Value() - pruneFilters.Add("dangling", fmt.Sprintf("%v", !options.all)) - pruneFilters = command.PruneFilters(dockerCli, pruneFilters) - - warning := danglingWarning - if options.all { - warning = allImageWarning - } - if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { - return 0, "", nil - } - - report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters) - if err != nil { - return 0, "", err - } - - if len(report.ImagesDeleted) > 0 { - output = "Deleted Images:\n" - for _, st := range report.ImagesDeleted { - if st.Untagged != "" { - output += fmt.Sprintln("untagged:", st.Untagged) - } else { - output += fmt.Sprintln("deleted:", st.Deleted) - } - } - spaceReclaimed = report.SpaceReclaimed - } - - return spaceReclaimed, output, nil -} - -// RunPrune calls the Image Prune API -// This returns the amount of space reclaimed and a detailed output string -func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) { - return runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter}) -} diff --git a/vendor/github.com/docker/cli/cli/command/image/pull.go b/vendor/github.com/docker/cli/cli/command/image/pull.go deleted file mode 100644 index be653ec92..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/pull.go +++ /dev/null @@ -1,80 +0,0 @@ -package image - -import ( - "fmt" - "strings" - - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/trust" - "github.com/docker/distribution/reference" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type pullOptions struct { - remote string - all bool - platform string -} - -// NewPullCommand creates a new `docker pull` command -func NewPullCommand(dockerCli command.Cli) *cobra.Command { - var opts pullOptions - - cmd := &cobra.Command{ - Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]", - Short: "Pull an image or a repository from a registry", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.remote = args[0] - return runPull(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository") - - command.AddPlatformFlag(flags, &opts.platform) - command.AddTrustVerificationFlags(flags) - - return cmd -} - -func runPull(cli command.Cli, opts pullOptions) error { - distributionRef, err := reference.ParseNormalizedNamed(opts.remote) - switch { - case err != nil: - return err - case opts.all && !reference.IsNameOnly(distributionRef): - return errors.New("tag can't be used with --all-tags/-a") - case !opts.all && reference.IsNameOnly(distributionRef): - distributionRef = reference.TagNameOnly(distributionRef) - if tagged, ok := distributionRef.(reference.Tagged); ok { - fmt.Fprintf(cli.Out(), "Using default tag: %s\n", tagged.Tag()) - } - } - - ctx := context.Background() - imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, AuthResolver(cli), distributionRef.String()) - if err != nil { - return err - } - - // Check if reference has a digest - _, isCanonical := distributionRef.(reference.Canonical) - if command.IsTrusted() && !isCanonical { - err = trustedPull(ctx, cli, imgRefAndAuth, opts.platform) - } else { - err = imagePullPrivileged(ctx, cli, imgRefAndAuth, opts.all, opts.platform) - } - if err != nil { - if strings.Contains(err.Error(), "when fetching 'plugin'") { - return errors.New(err.Error() + " - Use `docker plugin install`") - } - return err - } - return nil -} diff --git a/vendor/github.com/docker/cli/cli/command/image/push.go b/vendor/github.com/docker/cli/cli/command/image/push.go deleted file mode 100644 index 741ef67c5..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/push.go +++ /dev/null @@ -1,61 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - "github.com/docker/distribution/reference" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -// NewPushCommand creates a new `docker push` command -func NewPushCommand(dockerCli command.Cli) *cobra.Command { - cmd := &cobra.Command{ - Use: "push [OPTIONS] NAME[:TAG]", - Short: "Push an image or a repository to a registry", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPush(dockerCli, args[0]) - }, - } - - flags := cmd.Flags() - - command.AddTrustSigningFlags(flags) - - return cmd -} - -func runPush(dockerCli command.Cli, remote string) error { - ref, err := reference.ParseNormalizedNamed(remote) - if err != nil { - return err - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return err - } - - ctx := context.Background() - - // Resolve the Auth config relevant for this server - authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) - requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push") - - if command.IsTrusted() { - return TrustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege) - } - - responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref, requestPrivilege) - if err != nil { - return err - } - - defer responseBody.Close() - return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) -} diff --git a/vendor/github.com/docker/cli/cli/command/image/remove.go b/vendor/github.com/docker/cli/cli/command/image/remove.go deleted file mode 100644 index 068713490..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/remove.go +++ /dev/null @@ -1,87 +0,0 @@ -package image - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - "github.com/docker/docker/api/types" - apiclient "github.com/docker/docker/client" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -type removeOptions struct { - force bool - noPrune bool -} - -// NewRemoveCommand creates a new `docker remove` command -func NewRemoveCommand(dockerCli command.Cli) *cobra.Command { - var opts removeOptions - - cmd := &cobra.Command{ - Use: "rmi [OPTIONS] IMAGE [IMAGE...]", - Short: "Remove one or more images", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, opts, args) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image") - flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents") - - return cmd -} - -func newRemoveCommand(dockerCli command.Cli) *cobra.Command { - cmd := *NewRemoveCommand(dockerCli) - cmd.Aliases = []string{"rmi", "remove"} - cmd.Use = "rm [OPTIONS] IMAGE [IMAGE...]" - return &cmd -} - -func runRemove(dockerCli command.Cli, opts removeOptions, images []string) error { - client := dockerCli.Client() - ctx := context.Background() - - options := types.ImageRemoveOptions{ - Force: opts.force, - PruneChildren: !opts.noPrune, - } - - var errs []string - var fatalErr = false - for _, img := range images { - dels, err := client.ImageRemove(ctx, img, options) - if err != nil { - if !apiclient.IsErrNotFound(err) { - fatalErr = true - } - errs = append(errs, err.Error()) - } else { - for _, del := range dels { - if del.Deleted != "" { - fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted) - } else { - fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged) - } - } - } - } - - if len(errs) > 0 { - msg := strings.Join(errs, "\n") - if !opts.force || fatalErr { - return errors.New(msg) - } - fmt.Fprintf(dockerCli.Err(), msg) - } - return nil -} diff --git a/vendor/github.com/docker/cli/cli/command/image/save.go b/vendor/github.com/docker/cli/cli/command/image/save.go deleted file mode 100644 index 9cae97b37..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/save.go +++ /dev/null @@ -1,72 +0,0 @@ -package image - -import ( - "io" - "os" - "path/filepath" - - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type saveOptions struct { - images []string - output string -} - -// NewSaveCommand creates a new `docker save` command -func NewSaveCommand(dockerCli command.Cli) *cobra.Command { - var opts saveOptions - - cmd := &cobra.Command{ - Use: "save [OPTIONS] IMAGE [IMAGE...]", - Short: "Save one or more images to a tar archive (streamed to STDOUT by default)", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.images = args - return runSave(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") - - return cmd -} - -func runSave(dockerCli command.Cli, opts saveOptions) error { - if opts.output == "" && dockerCli.Out().IsTerminal() { - return errors.New("cowardly refusing to save to a terminal. Use the -o flag or redirect") - } - - if err := validateOutputPath(opts.output); err != nil { - return errors.Wrap(err, "failed to save image") - } - - responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images) - if err != nil { - return err - } - defer responseBody.Close() - - if opts.output == "" { - _, err := io.Copy(dockerCli.Out(), responseBody) - return err - } - - return command.CopyToFile(opts.output, responseBody) -} - -func validateOutputPath(path string) error { - dir := filepath.Dir(path) - if dir != "" && dir != "." { - if _, err := os.Stat(dir); os.IsNotExist(err) { - return errors.Errorf("unable to validate output path: directory %q does not exist", dir) - } - } - return nil -} diff --git a/vendor/github.com/docker/cli/cli/command/image/tag.go b/vendor/github.com/docker/cli/cli/command/image/tag.go deleted file mode 100644 index 2a50c127c..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/tag.go +++ /dev/null @@ -1,41 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - "github.com/spf13/cobra" -) - -type tagOptions struct { - image string - name string -} - -// NewTagCommand creates a new `docker tag` command -func NewTagCommand(dockerCli command.Cli) *cobra.Command { - var opts tagOptions - - cmd := &cobra.Command{ - Use: "tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]", - Short: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - opts.name = args[1] - return runTag(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - return cmd -} - -func runTag(dockerCli command.Cli, opts tagOptions) error { - ctx := context.Background() - - return dockerCli.Client().ImageTag(ctx, opts.image, opts.name) -} diff --git a/vendor/github.com/docker/cli/cli/command/image/trust.go b/vendor/github.com/docker/cli/cli/command/image/trust.go deleted file mode 100644 index 7a70ef96c..000000000 --- a/vendor/github.com/docker/cli/cli/command/image/trust.go +++ /dev/null @@ -1,363 +0,0 @@ -package image - -import ( - "encoding/hex" - "encoding/json" - "fmt" - "io" - "sort" - - "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/trust" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/registry" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/theupdateframework/notary/client" - "github.com/theupdateframework/notary/tuf/data" - "golang.org/x/net/context" -) - -type target struct { - name string - digest digest.Digest - size int64 -} - -// TrustedPush handles content trust pushing of an image -func TrustedPush(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { - responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref, requestPrivilege) - if err != nil { - return err - } - - defer responseBody.Close() - - return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody) -} - -// PushTrustedReference pushes a canonical reference to the trust server. -// nolint: gocyclo -func PushTrustedReference(streams command.Streams, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error { - // If it is a trusted push we would like to find the target entry which match the - // tag provided in the function and then do an AddTarget later. - target := &client.Target{} - // Count the times of calling for handleTarget, - // if it is called more that once, that should be considered an error in a trusted push. - cnt := 0 - handleTarget := func(aux *json.RawMessage) { - cnt++ - if cnt > 1 { - // handleTarget should only be called one. This will be treated as an error. - return - } - - var pushResult types.PushResult - err := json.Unmarshal(*aux, &pushResult) - if err == nil && pushResult.Tag != "" { - if dgst, err := digest.Parse(pushResult.Digest); err == nil { - h, err := hex.DecodeString(dgst.Hex()) - if err != nil { - target = nil - return - } - target.Name = pushResult.Tag - target.Hashes = data.Hashes{string(dgst.Algorithm()): h} - target.Length = int64(pushResult.Size) - } - } - } - - var tag string - switch x := ref.(type) { - case reference.Canonical: - return errors.New("cannot push a digest reference") - case reference.NamedTagged: - tag = x.Tag() - default: - // We want trust signatures to always take an explicit tag, - // otherwise it will act as an untrusted push. - if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), nil); err != nil { - return err - } - fmt.Fprintln(streams.Err(), "No tag specified, skipping trust metadata push") - return nil - } - - if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), handleTarget); err != nil { - return err - } - - if cnt > 1 { - return errors.Errorf("internal error: only one call to handleTarget expected") - } - - if target == nil { - return errors.Errorf("no targets found, please provide a specific tag in order to sign it") - } - - fmt.Fprintln(streams.Out(), "Signing and pushing trust metadata") - - repo, err := trust.GetNotaryRepository(streams.In(), streams.Out(), command.UserAgent(), repoInfo, &authConfig, "push", "pull") - if err != nil { - return errors.Wrap(err, "error establishing connection to trust repository") - } - - // get the latest repository metadata so we can figure out which roles to sign - _, err = repo.ListTargets() - - switch err.(type) { - case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: - keys := repo.GetCryptoService().ListKeys(data.CanonicalRootRole) - var rootKeyID string - // always select the first root key - if len(keys) > 0 { - sort.Strings(keys) - rootKeyID = keys[0] - } else { - rootPublicKey, err := repo.GetCryptoService().Create(data.CanonicalRootRole, "", data.ECDSAKey) - if err != nil { - return err - } - rootKeyID = rootPublicKey.ID() - } - - // Initialize the notary repository with a remotely managed snapshot key - if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil { - return trust.NotaryError(repoInfo.Name.Name(), err) - } - fmt.Fprintf(streams.Out(), "Finished initializing %q\n", repoInfo.Name.Name()) - err = repo.AddTarget(target, data.CanonicalTargetsRole) - case nil: - // already initialized and we have successfully downloaded the latest metadata - err = AddTargetToAllSignableRoles(repo, target) - default: - return trust.NotaryError(repoInfo.Name.Name(), err) - } - - if err == nil { - err = repo.Publish() - } - - if err != nil { - err = errors.Wrapf(err, "failed to sign %s:%s", repoInfo.Name.Name(), tag) - return trust.NotaryError(repoInfo.Name.Name(), err) - } - - fmt.Fprintf(streams.Out(), "Successfully signed %s:%s\n", repoInfo.Name.Name(), tag) - return nil -} - -// AddTargetToAllSignableRoles attempts to add the image target to all the top level delegation roles we can -// (based on whether we have the signing key and whether the role's path allows -// us to). -// If there are no delegation roles, we add to the targets role. -func AddTargetToAllSignableRoles(repo client.Repository, target *client.Target) error { - signableRoles, err := trust.GetSignableRoles(repo, target) - if err != nil { - return err - } - - return repo.AddTarget(target, signableRoles...) -} - -// imagePushPrivileged push the image -func imagePushPrivileged(ctx context.Context, cli command.Cli, authConfig types.AuthConfig, ref reference.Reference, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return nil, err - } - options := types.ImagePushOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - } - - return cli.Client().ImagePush(ctx, reference.FamiliarString(ref), options) -} - -// trustedPull handles content trust pulling of an image -func trustedPull(ctx context.Context, cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth, platform string) error { - refs, err := getTrustedPullTargets(cli, imgRefAndAuth) - if err != nil { - return err - } - - ref := imgRefAndAuth.Reference() - for i, r := range refs { - displayTag := r.name - if displayTag != "" { - displayTag = ":" + displayTag - } - fmt.Fprintf(cli.Out(), "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), reference.FamiliarName(ref), displayTag, r.digest) - - trustedRef, err := reference.WithDigest(reference.TrimNamed(ref), r.digest) - if err != nil { - return err - } - updatedImgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, AuthResolver(cli), trustedRef.String()) - if err != nil { - return err - } - if err := imagePullPrivileged(ctx, cli, updatedImgRefAndAuth, false, platform); err != nil { - return err - } - - tagged, err := reference.WithTag(reference.TrimNamed(ref), r.name) - if err != nil { - return err - } - - if err := TagTrusted(ctx, cli, trustedRef, tagged); err != nil { - return err - } - } - return nil -} - -func getTrustedPullTargets(cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth) ([]target, error) { - notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPullOnly) - if err != nil { - return nil, errors.Wrap(err, "error establishing connection to trust repository") - } - - ref := imgRefAndAuth.Reference() - tagged, isTagged := ref.(reference.NamedTagged) - if !isTagged { - // List all targets - targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) - if err != nil { - return nil, trust.NotaryError(ref.Name(), err) - } - var refs []target - for _, tgt := range targets { - t, err := convertTarget(tgt.Target) - if err != nil { - fmt.Fprintf(cli.Err(), "Skipping target for %q\n", reference.FamiliarName(ref)) - continue - } - // Only list tags in the top level targets role or the releases delegation role - ignore - // all other delegation roles - if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole { - continue - } - refs = append(refs, t) - } - if len(refs) == 0 { - return nil, trust.NotaryError(ref.Name(), errors.Errorf("No trusted tags for %s", ref.Name())) - } - return refs, nil - } - - t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) - if err != nil { - return nil, trust.NotaryError(ref.Name(), err) - } - // Only get the tag if it's in the top level targets role or the releases delegation role - // ignore it if it's in any other delegation roles - if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { - return nil, trust.NotaryError(ref.Name(), errors.Errorf("No trust data for %s", tagged.Tag())) - } - - logrus.Debugf("retrieving target for %s role", t.Role) - r, err := convertTarget(t.Target) - return []target{r}, err -} - -// imagePullPrivileged pulls the image and displays it to the output -func imagePullPrivileged(ctx context.Context, cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth, all bool, platform string) error { - ref := reference.FamiliarString(imgRefAndAuth.Reference()) - - encodedAuth, err := command.EncodeAuthToBase64(*imgRefAndAuth.AuthConfig()) - if err != nil { - return err - } - requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(cli, imgRefAndAuth.RepoInfo().Index, "pull") - options := types.ImagePullOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - All: all, - Platform: platform, - } - responseBody, err := cli.Client().ImagePull(ctx, ref, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), nil) -} - -// TrustedReference returns the canonical trusted reference for an image reference -func TrustedReference(ctx context.Context, cli command.Cli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) { - var ( - repoInfo *registry.RepositoryInfo - err error - ) - if rs != nil { - repoInfo, err = rs.ResolveRepository(ref) - } else { - repoInfo, err = registry.ParseRepositoryInfo(ref) - } - if err != nil { - return nil, err - } - - // Resolve the Auth config relevant for this server - authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) - - notaryRepo, err := trust.GetNotaryRepository(cli.In(), cli.Out(), command.UserAgent(), repoInfo, &authConfig, "pull") - if err != nil { - return nil, errors.Wrap(err, "error establishing connection to trust repository") - } - - t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) - if err != nil { - return nil, trust.NotaryError(repoInfo.Name.Name(), err) - } - // Only list tags in the top level targets role or the releases delegation role - ignore - // all other delegation roles - if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { - return nil, trust.NotaryError(repoInfo.Name.Name(), client.ErrNoSuchTarget(ref.Tag())) - } - r, err := convertTarget(t.Target) - if err != nil { - return nil, err - - } - return reference.WithDigest(reference.TrimNamed(ref), r.digest) -} - -func convertTarget(t client.Target) (target, error) { - h, ok := t.Hashes["sha256"] - if !ok { - return target{}, errors.New("no valid hash, expecting sha256") - } - return target{ - name: t.Name, - digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), - size: t.Length, - }, nil -} - -// TagTrusted tags a trusted ref -// nolint: interfacer -func TagTrusted(ctx context.Context, cli command.Cli, trustedRef reference.Canonical, ref reference.NamedTagged) error { - // Use familiar references when interacting with client and output - familiarRef := reference.FamiliarString(ref) - trustedFamiliarRef := reference.FamiliarString(trustedRef) - - fmt.Fprintf(cli.Err(), "Tagging %s as %s\n", trustedFamiliarRef, familiarRef) - - return cli.Client().ImageTag(ctx, trustedFamiliarRef, familiarRef) -} - -// AuthResolver returns an auth resolver function from a command.Cli -func AuthResolver(cli command.Cli) func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { - return func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { - return command.ResolveAuthConfig(ctx, cli, index) - } -} diff --git a/vendor/github.com/docker/cli/cli/command/in.go b/vendor/github.com/docker/cli/cli/command/in.go deleted file mode 100644 index 54855c6dc..000000000 --- a/vendor/github.com/docker/cli/cli/command/in.go +++ /dev/null @@ -1,56 +0,0 @@ -package command - -import ( - "errors" - "io" - "os" - "runtime" - - "github.com/docker/docker/pkg/term" -) - -// InStream is an input stream used by the DockerCli to read user input -type InStream struct { - CommonStream - in io.ReadCloser -} - -func (i *InStream) Read(p []byte) (int, error) { - return i.in.Read(p) -} - -// Close implements the Closer interface -func (i *InStream) Close() error { - return i.in.Close() -} - -// SetRawTerminal sets raw mode on the input terminal -func (i *InStream) SetRawTerminal() (err error) { - if os.Getenv("NORAW") != "" || !i.CommonStream.isTerminal { - return nil - } - i.CommonStream.state, err = term.SetRawTerminal(i.CommonStream.fd) - return err -} - -// CheckTty checks if we are trying to attach to a container tty -// from a non-tty client input stream, and if so, returns an error. -func (i *InStream) CheckTty(attachStdin, ttyMode bool) error { - // In order to attach to a container tty, input stream for the client must - // be a tty itself: redirecting or piping the client standard input is - // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. - if ttyMode && attachStdin && !i.isTerminal { - eText := "the input device is not a TTY" - if runtime.GOOS == "windows" { - return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") - } - return errors.New(eText) - } - return nil -} - -// NewInStream returns a new InStream object from a ReadCloser -func NewInStream(in io.ReadCloser) *InStream { - fd, isTerminal := term.GetFdInfo(in) - return &InStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, in: in} -} diff --git a/vendor/github.com/docker/cli/cli/command/out.go b/vendor/github.com/docker/cli/cli/command/out.go deleted file mode 100644 index 89cc5d3aa..000000000 --- a/vendor/github.com/docker/cli/cli/command/out.go +++ /dev/null @@ -1,50 +0,0 @@ -package command - -import ( - "io" - "os" - - "github.com/docker/docker/pkg/term" - "github.com/sirupsen/logrus" -) - -// OutStream is an output stream used by the DockerCli to write normal program -// output. -type OutStream struct { - CommonStream - out io.Writer -} - -func (o *OutStream) Write(p []byte) (int, error) { - return o.out.Write(p) -} - -// SetRawTerminal sets raw mode on the input terminal -func (o *OutStream) SetRawTerminal() (err error) { - if os.Getenv("NORAW") != "" || !o.CommonStream.isTerminal { - return nil - } - o.CommonStream.state, err = term.SetRawTerminalOutput(o.CommonStream.fd) - return err -} - -// GetTtySize returns the height and width in characters of the tty -func (o *OutStream) GetTtySize() (uint, uint) { - if !o.isTerminal { - return 0, 0 - } - ws, err := term.GetWinsize(o.fd) - if err != nil { - logrus.Debugf("Error getting size: %s", err) - if ws == nil { - return 0, 0 - } - } - return uint(ws.Height), uint(ws.Width) -} - -// NewOutStream returns a new OutStream object from a Writer -func NewOutStream(out io.Writer) *OutStream { - fd, isTerminal := term.GetFdInfo(out) - return &OutStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, out: out} -} diff --git a/vendor/github.com/docker/cli/cli/command/registry.go b/vendor/github.com/docker/cli/cli/command/registry.go deleted file mode 100644 index f3958d8a4..000000000 --- a/vendor/github.com/docker/cli/cli/command/registry.go +++ /dev/null @@ -1,189 +0,0 @@ -package command - -import ( - "bufio" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "os" - "runtime" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/registry" - "github.com/pkg/errors" -) - -// ElectAuthServer returns the default registry to use (by asking the daemon) -func ElectAuthServer(ctx context.Context, cli Cli) string { - // The daemon `/info` endpoint informs us of the default registry being - // used. This is essential in cross-platforms environment, where for - // example a Linux client might be interacting with a Windows daemon, hence - // the default registry URL might be Windows specific. - serverAddress := registry.IndexServer - if info, err := cli.Client().Info(ctx); err != nil { - fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) - } else if info.IndexServerAddress == "" { - fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", serverAddress) - } else { - serverAddress = info.IndexServerAddress - } - return serverAddress -} - -// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload -func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", err - } - return base64.URLEncoding.EncodeToString(buf), nil -} - -// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info -// for the given command. -func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { - return func() (string, error) { - fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName) - indexServer := registry.GetAuthConfigKey(index) - isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli) - authConfig, err := ConfigureAuth(cli, "", "", indexServer, isDefaultRegistry) - if err != nil { - return "", err - } - return EncodeAuthToBase64(authConfig) - } -} - -// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the -// default index, it uses the default index name for the daemon's platform, -// not the client's platform. -func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := index.Name - if index.Official { - configKey = ElectAuthServer(ctx, cli) - } - - a, _ := cli.ConfigFile().GetAuthConfig(configKey) - return a -} - -// ConfigureAuth returns an AuthConfig from the specified user, password and server. -func ConfigureAuth(cli Cli, flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { - // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 - if runtime.GOOS == "windows" { - cli.SetIn(NewInStream(os.Stdin)) - } - - if !isDefaultRegistry { - serverAddress = registry.ConvertToHostname(serverAddress) - } - - authconfig, err := cli.ConfigFile().GetAuthConfig(serverAddress) - if err != nil { - return authconfig, err - } - - // Some links documenting this: - // - https://code.google.com/archive/p/mintty/issues/56 - // - https://github.com/docker/docker/issues/15272 - // - https://mintty.github.io/ (compatibility) - // Linux will hit this if you attempt `cat | docker login`, and Windows - // will hit this if you attempt docker login from mintty where stdin - // is a pipe, not a character based console. - if flPassword == "" && !cli.In().IsTerminal() { - return authconfig, errors.Errorf("Error: Cannot perform an interactive login from a non TTY device") - } - - authconfig.Username = strings.TrimSpace(authconfig.Username) - - if flUser = strings.TrimSpace(flUser); flUser == "" { - if isDefaultRegistry { - // if this is a default registry (docker hub), then display the following message. - fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") - } - promptWithDefault(cli.Out(), "Username", authconfig.Username) - flUser = readInput(cli.In(), cli.Out()) - flUser = strings.TrimSpace(flUser) - if flUser == "" { - flUser = authconfig.Username - } - } - if flUser == "" { - return authconfig, errors.Errorf("Error: Non-null Username Required") - } - if flPassword == "" { - oldState, err := term.SaveState(cli.In().FD()) - if err != nil { - return authconfig, err - } - fmt.Fprintf(cli.Out(), "Password: ") - term.DisableEcho(cli.In().FD(), oldState) - - flPassword = readInput(cli.In(), cli.Out()) - fmt.Fprint(cli.Out(), "\n") - - term.RestoreTerminal(cli.In().FD(), oldState) - if flPassword == "" { - return authconfig, errors.Errorf("Error: Password Required") - } - } - - authconfig.Username = flUser - authconfig.Password = flPassword - authconfig.ServerAddress = serverAddress - authconfig.IdentityToken = "" - - return authconfig, nil -} - -func readInput(in io.Reader, out io.Writer) string { - reader := bufio.NewReader(in) - line, _, err := reader.ReadLine() - if err != nil { - fmt.Fprintln(out, err.Error()) - os.Exit(1) - } - return string(line) -} - -func promptWithDefault(out io.Writer, prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(out, "%s: ", prompt) - } else { - fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) - } -} - -// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image -func RetrieveAuthTokenFromImage(ctx context.Context, cli Cli, image string) (string, error) { - // Retrieve encoded auth token from the image reference - authConfig, err := resolveAuthConfigFromImage(ctx, cli, image) - if err != nil { - return "", err - } - encodedAuth, err := EncodeAuthToBase64(authConfig) - if err != nil { - return "", err - } - return encodedAuth, nil -} - -// resolveAuthConfigFromImage retrieves that AuthConfig using the image string -func resolveAuthConfigFromImage(ctx context.Context, cli Cli, image string) (types.AuthConfig, error) { - registryRef, err := reference.ParseNormalizedNamed(image) - if err != nil { - return types.AuthConfig{}, err - } - repoInfo, err := registry.ParseRepositoryInfo(registryRef) - if err != nil { - return types.AuthConfig{}, err - } - return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil -} diff --git a/vendor/github.com/docker/cli/cli/command/stream.go b/vendor/github.com/docker/cli/cli/command/stream.go deleted file mode 100644 index 71a43fa2e..000000000 --- a/vendor/github.com/docker/cli/cli/command/stream.go +++ /dev/null @@ -1,34 +0,0 @@ -package command - -import ( - "github.com/docker/docker/pkg/term" -) - -// CommonStream is an input stream used by the DockerCli to read user input -type CommonStream struct { - fd uintptr - isTerminal bool - state *term.State -} - -// FD returns the file descriptor number for this stream -func (s *CommonStream) FD() uintptr { - return s.fd -} - -// IsTerminal returns true if this stream is connected to a terminal -func (s *CommonStream) IsTerminal() bool { - return s.isTerminal -} - -// RestoreTerminal restores normal mode to the terminal -func (s *CommonStream) RestoreTerminal() { - if s.state != nil { - term.RestoreTerminal(s.fd, s.state) - } -} - -// SetIsTerminal sets the boolean used for isTerminal -func (s *CommonStream) SetIsTerminal(isTerminal bool) { - s.isTerminal = isTerminal -} diff --git a/vendor/github.com/docker/cli/cli/command/trust.go b/vendor/github.com/docker/cli/cli/command/trust.go deleted file mode 100644 index 51f760ac5..000000000 --- a/vendor/github.com/docker/cli/cli/command/trust.go +++ /dev/null @@ -1,47 +0,0 @@ -package command - -import ( - "os" - "strconv" - - "github.com/spf13/pflag" -) - -var ( - // TODO: make this not global - untrusted bool -) - -func init() { - untrusted = !getDefaultTrustState() -} - -// AddTrustVerificationFlags adds content trust flags to the provided flagset -func AddTrustVerificationFlags(fs *pflag.FlagSet) { - trusted := getDefaultTrustState() - fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image verification") -} - -// AddTrustSigningFlags adds "signing" flags to the provided flagset -func AddTrustSigningFlags(fs *pflag.FlagSet) { - trusted := getDefaultTrustState() - fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image signing") -} - -// getDefaultTrustState returns true if content trust is enabled through the $DOCKER_CONTENT_TRUST environment variable. -func getDefaultTrustState() bool { - var trusted bool - if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { - if t, err := strconv.ParseBool(e); t || err != nil { - // treat any other value as true - trusted = true - } - } - return trusted -} - -// IsTrusted returns true if content trust is enabled, either through the $DOCKER_CONTENT_TRUST environment variable, -// or through `--disabled-content-trust=false` on a command. -func IsTrusted() bool { - return !untrusted -} diff --git a/vendor/github.com/docker/cli/cli/command/utils.go b/vendor/github.com/docker/cli/cli/command/utils.go deleted file mode 100644 index dc543e7dc..000000000 --- a/vendor/github.com/docker/cli/cli/command/utils.go +++ /dev/null @@ -1,127 +0,0 @@ -package command - -import ( - "bufio" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/pkg/system" - "github.com/spf13/pflag" -) - -// CopyToFile writes the content of the reader to the specified file -func CopyToFile(outfile string, r io.Reader) error { - // We use sequential file access here to avoid depleting the standby list - // on Windows. On Linux, this is a call directly to ioutil.TempFile - tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".docker_temp_") - if err != nil { - return err - } - - tmpPath := tmpFile.Name() - - _, err = io.Copy(tmpFile, r) - tmpFile.Close() - - if err != nil { - os.Remove(tmpPath) - return err - } - - if err = os.Rename(tmpPath, outfile); err != nil { - os.Remove(tmpPath) - return err - } - - return nil -} - -// capitalizeFirst capitalizes the first character of string -func capitalizeFirst(s string) string { - switch l := len(s); l { - case 0: - return s - case 1: - return strings.ToLower(s) - default: - return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) - } -} - -// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. -func PrettyPrint(i interface{}) string { - switch t := i.(type) { - case nil: - return "None" - case string: - return capitalizeFirst(t) - default: - return capitalizeFirst(fmt.Sprintf("%s", t)) - } -} - -// PromptForConfirmation requests and checks confirmation from user. -// This will display the provided message followed by ' [y/N] '. If -// the user input 'y' or 'Y' it returns true other false. If no -// message is provided "Are you sure you want to proceed? [y/N] " -// will be used instead. -func PromptForConfirmation(ins io.Reader, outs io.Writer, message string) bool { - if message == "" { - message = "Are you sure you want to proceed?" - } - message += " [y/N] " - - fmt.Fprintf(outs, message) - - // On Windows, force the use of the regular OS stdin stream. - if runtime.GOOS == "windows" { - ins = NewInStream(os.Stdin) - } - - reader := bufio.NewReader(ins) - answer, _, _ := reader.ReadLine() - return strings.ToLower(string(answer)) == "y" -} - -// PruneFilters returns consolidated prune filters obtained from config.json and cli -func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args { - if dockerCli.ConfigFile() == nil { - return pruneFilters - } - for _, f := range dockerCli.ConfigFile().PruneFilters { - parts := strings.SplitN(f, "=", 2) - if len(parts) != 2 { - continue - } - if parts[0] == "label" { - // CLI label filter supersede config.json. - // If CLI label filter conflict with config.json, - // skip adding label! filter in config.json. - if pruneFilters.Include("label!") && pruneFilters.ExactMatch("label!", parts[1]) { - continue - } - } else if parts[0] == "label!" { - // CLI label! filter supersede config.json. - // If CLI label! filter conflict with config.json, - // skip adding label filter in config.json. - if pruneFilters.Include("label") && pruneFilters.ExactMatch("label", parts[1]) { - continue - } - } - pruneFilters.Add(parts[0], parts[1]) - } - - return pruneFilters -} - -// AddPlatformFlag adds `platform` to a set of flags for API version 1.32 and later. -func AddPlatformFlag(flags *pflag.FlagSet, target *string) { - flags.StringVar(target, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable") - flags.SetAnnotation("platform", "version", []string{"1.32"}) - flags.SetAnnotation("platform", "experimental", nil) -} diff --git a/vendor/github.com/docker/cli/cli/error.go b/vendor/github.com/docker/cli/cli/error.go deleted file mode 100644 index 62f62433b..000000000 --- a/vendor/github.com/docker/cli/cli/error.go +++ /dev/null @@ -1,33 +0,0 @@ -package cli - -import ( - "fmt" - "strings" -) - -// Errors is a list of errors. -// Useful in a loop if you don't want to return the error right away and you want to display after the loop, -// all the errors that happened during the loop. -type Errors []error - -func (errList Errors) Error() string { - if len(errList) < 1 { - return "" - } - - out := make([]string, len(errList)) - for i := range errList { - out[i] = errList[i].Error() - } - return strings.Join(out, ", ") -} - -// StatusError reports an unsuccessful exit by a command. -type StatusError struct { - Status string - StatusCode int -} - -func (e StatusError) Error() string { - return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) -} diff --git a/vendor/github.com/docker/cli/cli/required.go b/vendor/github.com/docker/cli/cli/required.go deleted file mode 100644 index 33a467354..000000000 --- a/vendor/github.com/docker/cli/cli/required.go +++ /dev/null @@ -1,107 +0,0 @@ -package cli - -import ( - "strings" - - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -// NoArgs validates args and returns an error if there are any args -func NoArgs(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return nil - } - - if cmd.HasSubCommands() { - return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) - } - - return errors.Errorf( - "%q accepts no arguments.\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) -} - -// RequiresMinArgs returns an error if there is not at least min args -func RequiresMinArgs(min int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) >= min { - return nil - } - return errors.Errorf( - "%q requires at least %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - min, - pluralize("argument", min), - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -// RequiresMaxArgs returns an error if there is not at most max args -func RequiresMaxArgs(max int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) <= max { - return nil - } - return errors.Errorf( - "%q requires at most %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - max, - pluralize("argument", max), - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -// RequiresRangeArgs returns an error if there is not at least min args and at most max args -func RequiresRangeArgs(min int, max int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) >= min && len(args) <= max { - return nil - } - return errors.Errorf( - "%q requires at least %d and at most %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - min, - max, - pluralize("argument", max), - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -// ExactArgs returns an error if there is not the exact number of args -func ExactArgs(number int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) == number { - return nil - } - return errors.Errorf( - "%q requires exactly %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - number, - pluralize("argument", number), - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -func pluralize(word string, number int) string { - if number == 1 { - return word - } - return word + "s" -} diff --git a/vendor/github.com/docker/cli/cli/version.go b/vendor/github.com/docker/cli/cli/version.go deleted file mode 100644 index bff7ab49e..000000000 --- a/vendor/github.com/docker/cli/cli/version.go +++ /dev/null @@ -1,9 +0,0 @@ -package cli - -// Default build-time variable. -// These values are overriding via ldflags -var ( - Version = "unknown-version" - GitCommit = "unknown-commit" - BuildTime = "unknown-buildtime" -) diff --git a/vendor/github.com/docker/distribution/registry/doc.go b/vendor/github.com/docker/distribution/registry/doc.go deleted file mode 100644 index a1ba7f3ab..000000000 --- a/vendor/github.com/docker/distribution/registry/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package registry provides the main entrypoints for running a registry. -package registry diff --git a/vendor/github.com/docker/distribution/registry/registry.go b/vendor/github.com/docker/distribution/registry/registry.go deleted file mode 100644 index fbb4dd879..000000000 --- a/vendor/github.com/docker/distribution/registry/registry.go +++ /dev/null @@ -1,356 +0,0 @@ -package registry - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - "os" - "time" - - "rsc.io/letsencrypt" - - logstash "github.com/bshuster-repo/logrus-logstash-hook" - "github.com/bugsnag/bugsnag-go" - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" - "github.com/docker/distribution/health" - "github.com/docker/distribution/registry/handlers" - "github.com/docker/distribution/registry/listener" - "github.com/docker/distribution/uuid" - "github.com/docker/distribution/version" - gorhandlers "github.com/gorilla/handlers" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "github.com/yvasiyarov/gorelic" -) - -// ServeCmd is a cobra command for running the registry. -var ServeCmd = &cobra.Command{ - Use: "serve ", - Short: "`serve` stores and distributes Docker images", - Long: "`serve` stores and distributes Docker images.", - Run: func(cmd *cobra.Command, args []string) { - - // setup context - ctx := context.WithVersion(context.Background(), version.Version) - - config, err := resolveConfiguration(args) - if err != nil { - fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) - cmd.Usage() - os.Exit(1) - } - - if config.HTTP.Debug.Addr != "" { - go func(addr string) { - log.Infof("debug server listening %v", addr) - if err := http.ListenAndServe(addr, nil); err != nil { - log.Fatalf("error listening on debug interface: %v", err) - } - }(config.HTTP.Debug.Addr) - } - - registry, err := NewRegistry(ctx, config) - if err != nil { - log.Fatalln(err) - } - - if err = registry.ListenAndServe(); err != nil { - log.Fatalln(err) - } - }, -} - -// A Registry represents a complete instance of the registry. -// TODO(aaronl): It might make sense for Registry to become an interface. -type Registry struct { - config *configuration.Configuration - app *handlers.App - server *http.Server -} - -// NewRegistry creates a new registry from a context and configuration struct. -func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { - var err error - ctx, err = configureLogging(ctx, config) - if err != nil { - return nil, fmt.Errorf("error configuring logger: %v", err) - } - - // inject a logger into the uuid library. warns us if there is a problem - // with uuid generation under low entropy. - uuid.Loggerf = context.GetLogger(ctx).Warnf - - app := handlers.NewApp(ctx, config) - // TODO(aaronl): The global scope of the health checks means NewRegistry - // can only be called once per process. - app.RegisterHealthChecks() - handler := configureReporting(app) - handler = alive("/", handler) - handler = health.Handler(handler) - handler = panicHandler(handler) - if !config.Log.AccessLog.Disabled { - handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) - } - - server := &http.Server{ - Handler: handler, - } - - return &Registry{ - app: app, - config: config, - server: server, - }, nil -} - -// ListenAndServe runs the registry's HTTP server. -func (registry *Registry) ListenAndServe() error { - config := registry.config - - ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) - if err != nil { - return err - } - - if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" { - tlsConf := &tls.Config{ - ClientAuth: tls.NoClientCert, - NextProtos: nextProtos(config), - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - }, - } - - if config.HTTP.TLS.LetsEncrypt.CacheFile != "" { - if config.HTTP.TLS.Certificate != "" { - return fmt.Errorf("cannot specify both certificate and Let's Encrypt") - } - var m letsencrypt.Manager - if err := m.CacheFile(config.HTTP.TLS.LetsEncrypt.CacheFile); err != nil { - return err - } - if !m.Registered() { - if err := m.Register(config.HTTP.TLS.LetsEncrypt.Email, nil); err != nil { - return err - } - } - tlsConf.GetCertificate = m.GetCertificate - } else { - tlsConf.Certificates = make([]tls.Certificate, 1) - tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) - if err != nil { - return err - } - } - - if len(config.HTTP.TLS.ClientCAs) != 0 { - pool := x509.NewCertPool() - - for _, ca := range config.HTTP.TLS.ClientCAs { - caPem, err := ioutil.ReadFile(ca) - if err != nil { - return err - } - - if ok := pool.AppendCertsFromPEM(caPem); !ok { - return fmt.Errorf("Could not add CA to pool") - } - } - - for _, subj := range pool.Subjects() { - context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) - } - - tlsConf.ClientAuth = tls.RequireAndVerifyClientCert - tlsConf.ClientCAs = pool - } - - ln = tls.NewListener(ln, tlsConf) - context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) - } else { - context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) - } - - return registry.server.Serve(ln) -} - -func configureReporting(app *handlers.App) http.Handler { - var handler http.Handler = app - - if app.Config.Reporting.Bugsnag.APIKey != "" { - bugsnagConfig := bugsnag.Configuration{ - APIKey: app.Config.Reporting.Bugsnag.APIKey, - // TODO(brianbland): provide the registry version here - // AppVersion: "2.0", - } - if app.Config.Reporting.Bugsnag.ReleaseStage != "" { - bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage - } - if app.Config.Reporting.Bugsnag.Endpoint != "" { - bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint - } - bugsnag.Configure(bugsnagConfig) - - handler = bugsnag.Handler(handler) - } - - if app.Config.Reporting.NewRelic.LicenseKey != "" { - agent := gorelic.NewAgent() - agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey - if app.Config.Reporting.NewRelic.Name != "" { - agent.NewrelicName = app.Config.Reporting.NewRelic.Name - } - agent.CollectHTTPStat = true - agent.Verbose = app.Config.Reporting.NewRelic.Verbose - agent.Run() - - handler = agent.WrapHTTPHandler(handler) - } - - return handler -} - -// configureLogging prepares the context with a logger using the -// configuration. -func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { - if config.Log.Level == "" && config.Log.Formatter == "" { - // If no config for logging is set, fallback to deprecated "Loglevel". - log.SetLevel(logLevel(config.Loglevel)) - ctx = context.WithLogger(ctx, context.GetLogger(ctx)) - return ctx, nil - } - - log.SetLevel(logLevel(config.Log.Level)) - - formatter := config.Log.Formatter - if formatter == "" { - formatter = "text" // default formatter - } - - switch formatter { - case "json": - log.SetFormatter(&log.JSONFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - case "text": - log.SetFormatter(&log.TextFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - case "logstash": - log.SetFormatter(&logstash.LogstashFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - default: - // just let the library use default on empty string. - if config.Log.Formatter != "" { - return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) - } - } - - if config.Log.Formatter != "" { - log.Debugf("using %q logging formatter", config.Log.Formatter) - } - - if len(config.Log.Fields) > 0 { - // build up the static fields, if present. - var fields []interface{} - for k := range config.Log.Fields { - fields = append(fields, k) - } - - ctx = context.WithValues(ctx, config.Log.Fields) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) - } - - return ctx, nil -} - -func logLevel(level configuration.Loglevel) log.Level { - l, err := log.ParseLevel(string(level)) - if err != nil { - l = log.InfoLevel - log.Warnf("error parsing level %q: %v, using %q ", level, err, l) - } - - return l -} - -// panicHandler add an HTTP handler to web app. The handler recover the happening -// panic. logrus.Panic transmits panic message to pre-config log hooks, which is -// defined in config.yml. -func panicHandler(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer func() { - if err := recover(); err != nil { - log.Panic(fmt.Sprintf("%v", err)) - } - }() - handler.ServeHTTP(w, r) - }) -} - -// alive simply wraps the handler with a route that always returns an http 200 -// response when the path is matched. If the path is not matched, the request -// is passed to the provided handler. There is no guarantee of anything but -// that the server is up. Wrap with other handlers (such as health.Handler) -// for greater affect. -func alive(path string, handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == path { - w.Header().Set("Cache-Control", "no-cache") - w.WriteHeader(http.StatusOK) - return - } - - handler.ServeHTTP(w, r) - }) -} - -func resolveConfiguration(args []string) (*configuration.Configuration, error) { - var configurationPath string - - if len(args) > 0 { - configurationPath = args[0] - } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { - configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") - } - - if configurationPath == "" { - return nil, fmt.Errorf("configuration path unspecified") - } - - fp, err := os.Open(configurationPath) - if err != nil { - return nil, err - } - - defer fp.Close() - - config, err := configuration.Parse(fp) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) - } - - return config, nil -} - -func nextProtos(config *configuration.Configuration) []string { - switch config.HTTP.HTTP2.Disabled { - case true: - return []string{"http/1.1"} - default: - return []string{"h2", "http/1.1"} - } -} diff --git a/vendor/github.com/docker/distribution/registry/root.go b/vendor/github.com/docker/distribution/registry/root.go deleted file mode 100644 index 5d3005c26..000000000 --- a/vendor/github.com/docker/distribution/registry/root.go +++ /dev/null @@ -1,84 +0,0 @@ -package registry - -import ( - "fmt" - "os" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/distribution/version" - "github.com/docker/libtrust" - "github.com/spf13/cobra" -) - -var showVersion bool - -func init() { - RootCmd.AddCommand(ServeCmd) - RootCmd.AddCommand(GCCmd) - GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs") - RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") -} - -// RootCmd is the main command for the 'registry' binary. -var RootCmd = &cobra.Command{ - Use: "registry", - Short: "`registry`", - Long: "`registry`", - Run: func(cmd *cobra.Command, args []string) { - if showVersion { - version.PrintVersion() - return - } - cmd.Usage() - }, -} - -var dryRun bool - -// GCCmd is the cobra command that corresponds to the garbage-collect subcommand -var GCCmd = &cobra.Command{ - Use: "garbage-collect ", - Short: "`garbage-collect` deletes layers not referenced by any manifests", - Long: "`garbage-collect` deletes layers not referenced by any manifests", - Run: func(cmd *cobra.Command, args []string) { - config, err := resolveConfiguration(args) - if err != nil { - fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) - cmd.Usage() - os.Exit(1) - } - - driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters()) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err) - os.Exit(1) - } - - ctx := context.Background() - ctx, err = configureLogging(ctx, config) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err) - os.Exit(1) - } - - k, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - fmt.Fprint(os.Stderr, err) - os.Exit(1) - } - - registry, err := storage.NewRegistry(ctx, driver, storage.Schema1SigningKey(k)) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err) - os.Exit(1) - } - - err = storage.MarkAndSweep(ctx, driver, registry, dryRun) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) - os.Exit(1) - } - }, -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go b/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go deleted file mode 100644 index fad0a77a1..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go +++ /dev/null @@ -1,60 +0,0 @@ -package storage - -import ( - "expvar" - "sync/atomic" - - "github.com/docker/distribution/registry/storage/cache" -) - -type blobStatCollector struct { - metrics cache.Metrics -} - -func (bsc *blobStatCollector) Hit() { - atomic.AddUint64(&bsc.metrics.Requests, 1) - atomic.AddUint64(&bsc.metrics.Hits, 1) -} - -func (bsc *blobStatCollector) Miss() { - atomic.AddUint64(&bsc.metrics.Requests, 1) - atomic.AddUint64(&bsc.metrics.Misses, 1) -} - -func (bsc *blobStatCollector) Metrics() cache.Metrics { - return bsc.metrics -} - -// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor -// cache requests. Note this is kept globally and made available via expvar. -// For more detailed metrics, its recommend to instrument a particular cache -// implementation. -var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} - -func init() { - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - cache := registry.(*expvar.Map).Get("cache") - if cache == nil { - cache = &expvar.Map{} - cache.(*expvar.Map).Init() - registry.(*expvar.Map).Set("cache", cache) - } - - storage := cache.(*expvar.Map).Get("storage") - if storage == nil { - storage = &expvar.Map{} - storage.(*expvar.Map).Init() - cache.(*expvar.Map).Set("storage", storage) - } - - storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { - // no need for synchronous access: the increments are atomic and - // during reading, we don't care if the data is up to date. The - // numbers will always *eventually* be reported correctly. - return blobStatterCacheMetrics - })) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobserver.go b/vendor/github.com/docker/distribution/registry/storage/blobserver.go deleted file mode 100644 index 739bf3cb3..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/blobserver.go +++ /dev/null @@ -1,78 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" - "github.com/opencontainers/go-digest" -) - -// TODO(stevvooe): This should configurable in the future. -const blobCacheControlMaxAge = 365 * 24 * time.Hour - -// blobServer simply serves blobs from a driver instance using a path function -// to identify paths and a descriptor service to fill in metadata. -type blobServer struct { - driver driver.StorageDriver - statter distribution.BlobStatter - pathFn func(dgst digest.Digest) (string, error) - redirect bool // allows disabling URLFor redirects -} - -func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - desc, err := bs.statter.Stat(ctx, dgst) - if err != nil { - return err - } - - path, err := bs.pathFn(desc.Digest) - if err != nil { - return err - } - - if bs.redirect { - redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) - switch err.(type) { - case nil: - // Redirect to storage URL. - http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) - return err - - case driver.ErrUnsupportedMethod: - // Fallback to serving the content directly. - default: - // Some unexpected error. - return err - } - } - - br, err := newFileReader(ctx, bs.driver, path, desc.Size) - if err != nil { - return err - } - defer br.Close() - - w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent - w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) - - if w.Header().Get("Docker-Content-Digest") == "" { - w.Header().Set("Docker-Content-Digest", desc.Digest.String()) - } - - if w.Header().Get("Content-Type") == "" { - // Set the content type if not already set. - w.Header().Set("Content-Type", desc.MediaType) - } - - if w.Header().Get("Content-Length") == "" { - // Set the content length if not already set. - w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) - } - - http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobstore.go b/vendor/github.com/docker/distribution/registry/storage/blobstore.go deleted file mode 100644 index 4a16488b8..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/blobstore.go +++ /dev/null @@ -1,223 +0,0 @@ -package storage - -import ( - "path" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" - "github.com/opencontainers/go-digest" -) - -// blobStore implements the read side of the blob store interface over a -// driver without enforcing per-repository membership. This object is -// intentionally a leaky abstraction, providing utility methods that support -// creating and traversing backend links. -type blobStore struct { - driver driver.StorageDriver - statter distribution.BlobStatter -} - -var _ distribution.BlobProvider = &blobStore{} - -// Get implements the BlobReadService.Get call. -func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - bp, err := bs.path(dgst) - if err != nil { - return nil, err - } - - p, err := getContent(ctx, bs.driver, bp) - if err != nil { - switch err.(type) { - case driver.PathNotFoundError: - return nil, distribution.ErrBlobUnknown - } - - return nil, err - } - - return p, nil -} - -func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - desc, err := bs.statter.Stat(ctx, dgst) - if err != nil { - return nil, err - } - - path, err := bs.path(desc.Digest) - if err != nil { - return nil, err - } - - return newFileReader(ctx, bs.driver, path, desc.Size) -} - -// Put stores the content p in the blob store, calculating the digest. If the -// content is already present, only the digest will be returned. This should -// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations -func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst := digest.FromBytes(p) - desc, err := bs.statter.Stat(ctx, dgst) - if err == nil { - // content already present - return desc, nil - } else if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err) - // real error, return it - return distribution.Descriptor{}, err - } - - bp, err := bs.path(dgst) - if err != nil { - return distribution.Descriptor{}, err - } - - // TODO(stevvooe): Write out mediatype here, as well. - return distribution.Descriptor{ - Size: int64(len(p)), - - // NOTE(stevvooe): The central blob store firewalls media types from - // other users. The caller should look this up and override the value - // for the specific repository. - MediaType: "application/octet-stream", - Digest: dgst, - }, bs.driver.PutContent(ctx, bp, p) -} - -func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error { - - specPath, err := pathFor(blobsPathSpec{}) - if err != nil { - return err - } - - err = Walk(ctx, bs.driver, specPath, func(fileInfo driver.FileInfo) error { - // skip directories - if fileInfo.IsDir() { - return nil - } - - currentPath := fileInfo.Path() - // we only want to parse paths that end with /data - _, fileName := path.Split(currentPath) - if fileName != "data" { - return nil - } - - digest, err := digestFromPath(currentPath) - if err != nil { - return err - } - - return ingester(digest) - }) - return err -} - -// path returns the canonical path for the blob identified by digest. The blob -// may or may not exist. -func (bs *blobStore) path(dgst digest.Digest) (string, error) { - bp, err := pathFor(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return "", err - } - - return bp, nil -} - -// link links the path to the provided digest by writing the digest into the -// target file. Caller must ensure that the blob actually exists. -func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { - // The contents of the "link" file are the exact string contents of the - // digest, which is specified in that package. - return bs.driver.PutContent(ctx, path, []byte(dgst)) -} - -// readlink returns the linked digest at path. -func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { - content, err := bs.driver.GetContent(ctx, path) - if err != nil { - return "", err - } - - linked, err := digest.Parse(string(content)) - if err != nil { - return "", err - } - - return linked, nil -} - -// resolve reads the digest link at path and returns the blob store path. -func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { - dgst, err := bs.readlink(ctx, path) - if err != nil { - return "", err - } - - return bs.path(dgst) -} - -type blobStatter struct { - driver driver.StorageDriver -} - -var _ distribution.BlobDescriptorService = &blobStatter{} - -// Stat implements BlobStatter.Stat by returning the descriptor for the blob -// in the main blob store. If this method returns successfully, there is -// strong guarantee that the blob exists and is available. -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - path, err := pathFor(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return distribution.Descriptor{}, err - } - - fi, err := bs.driver.Stat(ctx, path) - if err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return distribution.Descriptor{}, distribution.ErrBlobUnknown - default: - return distribution.Descriptor{}, err - } - } - - if fi.IsDir() { - // NOTE(stevvooe): This represents a corruption situation. Somehow, we - // calculated a blob path and then detected a directory. We log the - // error and then error on the side of not knowing about the blob. - context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - // TODO(stevvooe): Add method to resolve the mediatype. We can store and - // cache a "global" media type for the blob, even if a specific repo has a - // mediatype that overrides the main one. - - return distribution.Descriptor{ - Size: fi.Size(), - - // NOTE(stevvooe): The central blob store firewalls media types from - // other users. The caller should look this up and override the value - // for the specific repository. - MediaType: "application/octet-stream", - Digest: dgst, - }, nil -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - return distribution.ErrUnsupported -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return distribution.ErrUnsupported -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go deleted file mode 100644 index 392b166c6..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go +++ /dev/null @@ -1,400 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - "io" - "path" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -var ( - errResumableDigestNotAvailable = errors.New("resumable digest not available") -) - -const ( - // digestSha256Empty is the canonical sha256 digest of empty data - digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -) - -// blobWriter is used to control the various aspects of resumable -// blob upload. -type blobWriter struct { - ctx context.Context - blobStore *linkedBlobStore - - id string - startedAt time.Time - digester digest.Digester - written int64 // track the contiguous write - - fileWriter storagedriver.FileWriter - driver storagedriver.StorageDriver - path string - - resumableDigestEnabled bool - committed bool -} - -var _ distribution.BlobWriter = &blobWriter{} - -// ID returns the identifier for this upload. -func (bw *blobWriter) ID() string { - return bw.id -} - -func (bw *blobWriter) StartedAt() time.Time { - return bw.startedAt -} - -// Commit marks the upload as completed, returning a valid descriptor. The -// final size and digest are checked against the first descriptor provided. -func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - context.GetLogger(ctx).Debug("(*blobWriter).Commit") - - if err := bw.fileWriter.Commit(); err != nil { - return distribution.Descriptor{}, err - } - - bw.Close() - desc.Size = bw.Size() - - canonical, err := bw.validateBlob(ctx, desc) - if err != nil { - return distribution.Descriptor{}, err - } - - if err := bw.moveBlob(ctx, canonical); err != nil { - return distribution.Descriptor{}, err - } - - if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { - return distribution.Descriptor{}, err - } - - if err := bw.removeResources(ctx); err != nil { - return distribution.Descriptor{}, err - } - - err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) - if err != nil { - return distribution.Descriptor{}, err - } - - bw.committed = true - return canonical, nil -} - -// Cancel the blob upload process, releasing any resources associated with -// the writer and canceling the operation. -func (bw *blobWriter) Cancel(ctx context.Context) error { - context.GetLogger(ctx).Debug("(*blobWriter).Cancel") - if err := bw.fileWriter.Cancel(); err != nil { - return err - } - - if err := bw.Close(); err != nil { - context.GetLogger(ctx).Errorf("error closing blobwriter: %s", err) - } - - if err := bw.removeResources(ctx); err != nil { - return err - } - - return nil -} - -func (bw *blobWriter) Size() int64 { - return bw.fileWriter.Size() -} - -func (bw *blobWriter) Write(p []byte) (int, error) { - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { - return 0, err - } - - n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p) - bw.written += int64(n) - - return n, err -} - -func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { - return 0, err - } - - nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r) - bw.written += nn - - return nn, err -} - -func (bw *blobWriter) Close() error { - if bw.committed { - return errors.New("blobwriter close after commit") - } - - if err := bw.storeHashState(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { - return err - } - - return bw.fileWriter.Close() -} - -// validateBlob checks the data against the digest, returning an error if it -// does not match. The canonical descriptor is returned. -func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - var ( - verified, fullHash bool - canonical digest.Digest - ) - - if desc.Digest == "" { - // if no descriptors are provided, we have nothing to validate - // against. We don't really want to support this for the registry. - return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ - Reason: fmt.Errorf("cannot validate against empty digest"), - } - } - - var size int64 - - // Stat the on disk file - if fi, err := bw.driver.Stat(ctx, bw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // NOTE(stevvooe): We really don't care if the file is - // not actually present for the reader. We now assume - // that the desc length is zero. - desc.Size = 0 - default: - // Any other error we want propagated up the stack. - return distribution.Descriptor{}, err - } - } else { - if fi.IsDir() { - return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) - } - - size = fi.Size() - } - - if desc.Size > 0 { - if desc.Size != size { - return distribution.Descriptor{}, distribution.ErrBlobInvalidLength - } - } else { - // if provided 0 or negative length, we can assume caller doesn't know or - // care about length. - desc.Size = size - } - - // TODO(stevvooe): This section is very meandering. Need to be broken down - // to be a lot more clear. - - if err := bw.resumeDigest(ctx); err == nil { - canonical = bw.digester.Digest() - - if canonical.Algorithm() == desc.Digest.Algorithm() { - // Common case: client and server prefer the same canonical digest - // algorithm - currently SHA256. - verified = desc.Digest == canonical - } else { - // The client wants to use a different digest algorithm. They'll just - // have to be patient and wait for us to download and re-hash the - // uploaded content using that digest algorithm. - fullHash = true - } - } else if err == errResumableDigestNotAvailable { - // Not using resumable digests, so we need to hash the entire layer. - fullHash = true - } else { - return distribution.Descriptor{}, err - } - - if fullHash { - // a fantastic optimization: if the the written data and the size are - // the same, we don't need to read the data from the backend. This is - // because we've written the entire file in the lifecycle of the - // current instance. - if bw.written == size && digest.Canonical == desc.Digest.Algorithm() { - canonical = bw.digester.Digest() - verified = desc.Digest == canonical - } - - // If the check based on size fails, we fall back to the slowest of - // paths. We may be able to make the size-based check a stronger - // guarantee, so this may be defensive. - if !verified { - digester := digest.Canonical.Digester() - verifier := desc.Digest.Verifier() - - // Read the file from the backend driver and validate it. - fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size) - if err != nil { - return distribution.Descriptor{}, err - } - defer fr.Close() - - tr := io.TeeReader(fr, digester.Hash()) - - if _, err := io.Copy(verifier, tr); err != nil { - return distribution.Descriptor{}, err - } - - canonical = digester.Digest() - verified = verifier.Verified() - } - } - - if !verified { - context.GetLoggerWithFields(ctx, - map[interface{}]interface{}{ - "canonical": canonical, - "provided": desc.Digest, - }, "canonical", "provided"). - Errorf("canonical digest does match provided digest") - return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ - Digest: desc.Digest, - Reason: fmt.Errorf("content does not match digest"), - } - } - - // update desc with canonical hash - desc.Digest = canonical - - if desc.MediaType == "" { - desc.MediaType = "application/octet-stream" - } - - return desc, nil -} - -// moveBlob moves the data into its final, hash-qualified destination, -// identified by dgst. The layer should be validated before commencing the -// move. -func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { - blobPath, err := pathFor(blobDataPathSpec{ - digest: desc.Digest, - }) - - if err != nil { - return err - } - - // Check for existence - if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // ensure that it doesn't exist. - default: - return err - } - } else { - // If the path exists, we can assume that the content has already - // been uploaded, since the blob storage is content-addressable. - // While it may be corrupted, detection of such corruption belongs - // elsewhere. - return nil - } - - // If no data was received, we may not actually have a file on disk. Check - // the size here and write a zero-length file to blobPath if this is the - // case. For the most part, this should only ever happen with zero-length - // blobs. - if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // HACK(stevvooe): This is slightly dangerous: if we verify above, - // get a hash, then the underlying file is deleted, we risk moving - // a zero-length blob into a nonzero-length blob location. To - // prevent this horrid thing, we employ the hack of only allowing - // to this happen for the digest of an empty blob. - if desc.Digest == digestSha256Empty { - return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) - } - - // We let this fail during the move below. - logrus. - WithField("upload.id", bw.ID()). - WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") - default: - return err // unrelated error - } - } - - // TODO(stevvooe): We should also write the mediatype when executing this move. - - return bw.blobStore.driver.Move(ctx, bw.path, blobPath) -} - -// removeResources should clean up all resources associated with the upload -// instance. An error will be returned if the clean up cannot proceed. If the -// resources are already not present, no error will be returned. -func (bw *blobWriter) removeResources(ctx context.Context) error { - dataPath, err := pathFor(uploadDataPathSpec{ - name: bw.blobStore.repository.Named().Name(), - id: bw.id, - }) - - if err != nil { - return err - } - - // Resolve and delete the containing directory, which should include any - // upload related files. - dirPath := path.Dir(dataPath) - if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // already gone! - default: - // This should be uncommon enough such that returning an error - // should be okay. At this point, the upload should be mostly - // complete, but perhaps the backend became unaccessible. - context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) - return err - } - } - - return nil -} - -func (bw *blobWriter) Reader() (io.ReadCloser, error) { - // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 - try := 1 - for try <= 5 { - _, err := bw.driver.Stat(bw.ctx, bw.path) - if err == nil { - break - } - switch err.(type) { - case storagedriver.PathNotFoundError: - context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) - time.Sleep(1 * time.Second) - try++ - default: - return nil, err - } - } - - readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0) - if err != nil { - return nil, err - } - - return readCloser, nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go deleted file mode 100644 index 32f130974..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build noresumabledigest - -package storage - -import ( - "github.com/docker/distribution/context" -) - -// resumeHashAt is a noop when resumable digest support is disabled. -func (bw *blobWriter) resumeDigest(ctx context.Context) error { - return errResumableDigestNotAvailable -} - -// storeHashState is a noop when resumable digest support is disabled. -func (bw *blobWriter) storeHashState(ctx context.Context) error { - return errResumableDigestNotAvailable -} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go deleted file mode 100644 index cc1a833d0..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go +++ /dev/null @@ -1,145 +0,0 @@ -// +build !noresumabledigest - -package storage - -import ( - "fmt" - "path" - "strconv" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/sirupsen/logrus" - "github.com/stevvooe/resumable" - - // register resumable hashes with import - _ "github.com/stevvooe/resumable/sha256" - _ "github.com/stevvooe/resumable/sha512" -) - -// resumeDigest attempts to restore the state of the internal hash function -// by loading the most recent saved hash state equal to the current size of the blob. -func (bw *blobWriter) resumeDigest(ctx context.Context) error { - if !bw.resumableDigestEnabled { - return errResumableDigestNotAvailable - } - - h, ok := bw.digester.Hash().(resumable.Hash) - if !ok { - return errResumableDigestNotAvailable - } - offset := bw.fileWriter.Size() - if offset == int64(h.Len()) { - // State of digester is already at the requested offset. - return nil - } - - // List hash states from storage backend. - var hashStateMatch hashStateEntry - hashStates, err := bw.getStoredHashStates(ctx) - if err != nil { - return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) - } - - // Find the highest stored hashState with offset equal to - // the requested offset. - for _, hashState := range hashStates { - if hashState.offset == offset { - hashStateMatch = hashState - break // Found an exact offset match. - } - } - - if hashStateMatch.offset == 0 { - // No need to load any state, just reset the hasher. - h.Reset() - } else { - storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) - if err != nil { - return err - } - - if err = h.Restore(storedState); err != nil { - return err - } - } - - // Mind the gap. - if gapLen := offset - int64(h.Len()); gapLen > 0 { - return errResumableDigestNotAvailable - } - - return nil -} - -type hashStateEntry struct { - offset int64 - path string -} - -// getStoredHashStates returns a slice of hashStateEntries for this upload. -func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { - uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Named().String(), - id: bw.id, - alg: bw.digester.Digest().Algorithm(), - list: true, - }) - - if err != nil { - return nil, err - } - - paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) - if err != nil { - if _, ok := err.(storagedriver.PathNotFoundError); !ok { - return nil, err - } - // Treat PathNotFoundError as no entries. - paths = nil - } - - hashStateEntries := make([]hashStateEntry, 0, len(paths)) - - for _, p := range paths { - pathSuffix := path.Base(p) - // The suffix should be the offset. - offset, err := strconv.ParseInt(pathSuffix, 0, 64) - if err != nil { - logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) - } - - hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) - } - - return hashStateEntries, nil -} - -func (bw *blobWriter) storeHashState(ctx context.Context) error { - if !bw.resumableDigestEnabled { - return errResumableDigestNotAvailable - } - - h, ok := bw.digester.Hash().(resumable.Hash) - if !ok { - return errResumableDigestNotAvailable - } - - uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Named().String(), - id: bw.id, - alg: bw.digester.Digest().Algorithm(), - offset: int64(h.Len()), - }) - - if err != nil { - return err - } - - hashState, err := h.State() - if err != nil { - return err - } - - return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/catalog.go b/vendor/github.com/docker/distribution/registry/storage/catalog.go deleted file mode 100644 index 0b59a39ac..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/catalog.go +++ /dev/null @@ -1,153 +0,0 @@ -package storage - -import ( - "errors" - "io" - "path" - "strings" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" -) - -// errFinishedWalk signals an early exit to the walk when the current query -// is satisfied. -var errFinishedWalk = errors.New("finished walk") - -// Returns a list, or partial list, of repositories in the registry. -// Because it's a quite expensive operation, it should only be used when building up -// an initial set of repositories. -func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { - var foundRepos []string - - if len(repos) == 0 { - return 0, errors.New("no space in slice") - } - - root, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return 0, err - } - - err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { - err := handleRepository(fileInfo, root, last, func(repoPath string) error { - foundRepos = append(foundRepos, repoPath) - return nil - }) - if err != nil { - return err - } - - // if we've filled our array, no need to walk any further - if len(foundRepos) == len(repos) { - return errFinishedWalk - } - - return nil - }) - - n = copy(repos, foundRepos) - - switch err { - case nil: - // nil means that we completed walk and didn't fill buffer. No more - // records are available. - err = io.EOF - case errFinishedWalk: - // more records are available. - err = nil - } - - return n, err -} - -// Enumerate applies ingester to each repository -func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error { - root, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return err - } - - err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { - return handleRepository(fileInfo, root, "", ingester) - }) - - return err -} - -// lessPath returns true if one path a is less than path b. -// -// A component-wise comparison is done, rather than the lexical comparison of -// strings. -func lessPath(a, b string) bool { - // we provide this behavior by making separator always sort first. - return compareReplaceInline(a, b, '/', '\x00') < 0 -} - -// compareReplaceInline modifies runtime.cmpstring to replace old with new -// during a byte-wise comparison. -func compareReplaceInline(s1, s2 string, old, new byte) int { - // TODO(stevvooe): We are missing an optimization when the s1 and s2 have - // the exact same slice header. It will make the code unsafe but can - // provide some extra performance. - - l := len(s1) - if len(s2) < l { - l = len(s2) - } - - for i := 0; i < l; i++ { - c1, c2 := s1[i], s2[i] - if c1 == old { - c1 = new - } - - if c2 == old { - c2 = new - } - - if c1 < c2 { - return -1 - } - - if c1 > c2 { - return +1 - } - } - - if len(s1) < len(s2) { - return -1 - } - - if len(s1) > len(s2) { - return +1 - } - - return 0 -} - -// handleRepository calls function fn with a repository path if fileInfo -// has a path of a repository under root and that it is lexographically -// after last. Otherwise, it will return ErrSkipDir. This should be used -// with Walk to do handling with repositories in a storage. -func handleRepository(fileInfo driver.FileInfo, root, last string, fn func(repoPath string) error) error { - filePath := fileInfo.Path() - - // lop the base path off - repo := filePath[len(root)+1:] - - _, file := path.Split(repo) - if file == "_layers" { - repo = strings.TrimSuffix(repo, "/_layers") - if lessPath(last, repo) { - if err := fn(repo); err != nil { - return err - } - } - return ErrSkipDir - } else if strings.HasPrefix(file, "_") { - return ErrSkipDir - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/doc.go b/vendor/github.com/docker/distribution/registry/storage/doc.go deleted file mode 100644 index 387d92348..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package storage contains storage services for use in the registry -// application. It should be considered an internal package, as of Go 1.4. -package storage diff --git a/vendor/github.com/docker/distribution/registry/storage/filereader.go b/vendor/github.com/docker/distribution/registry/storage/filereader.go deleted file mode 100644 index 3b06c8179..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/filereader.go +++ /dev/null @@ -1,177 +0,0 @@ -package storage - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// TODO(stevvooe): Set an optimal buffer size here. We'll have to -// understand the latency characteristics of the underlying network to -// set this correctly, so we may want to leave it to the driver. For -// out of process drivers, we'll have to optimize this buffer size for -// local communication. -const fileReaderBufferSize = 4 << 20 - -// remoteFileReader provides a read seeker interface to files stored in -// storagedriver. Used to implement part of layer interface and will be used -// to implement read side of LayerUpload. -type fileReader struct { - driver storagedriver.StorageDriver - - ctx context.Context - - // identifying fields - path string - size int64 // size is the total size, must be set. - - // mutable fields - rc io.ReadCloser // remote read closer - brd *bufio.Reader // internal buffered io - offset int64 // offset is the current read offset - err error // terminal error, if set, reader is closed -} - -// newFileReader initializes a file reader for the remote file. The reader -// takes on the size and path that must be determined externally with a stat -// call. The reader operates optimistically, assuming that the file is already -// there. -func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { - return &fileReader{ - ctx: ctx, - driver: driver, - path: path, - size: size, - }, nil -} - -func (fr *fileReader) Read(p []byte) (n int, err error) { - if fr.err != nil { - return 0, fr.err - } - - rd, err := fr.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - fr.offset += int64(n) - - // Simulate io.EOR error if we reach filesize. - if err == nil && fr.offset >= fr.size { - err = io.EOF - } - - return n, err -} - -func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { - if fr.err != nil { - return 0, fr.err - } - - var err error - newOffset := fr.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = fr.size + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else { - if fr.offset != newOffset { - fr.reset() - } - - // No problems, set the offset. - fr.offset = newOffset - } - - return fr.offset, err -} - -func (fr *fileReader) Close() error { - return fr.closeWithErr(fmt.Errorf("fileReader: closed")) -} - -// reader prepares the current reader at the lrs offset, ensuring its buffered -// and ready to go. -func (fr *fileReader) reader() (io.Reader, error) { - if fr.err != nil { - return nil, fr.err - } - - if fr.rc != nil { - return fr.brd, nil - } - - // If we don't have a reader, open one up. - rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // NOTE(stevvooe): If the path is not found, we simply return a - // reader that returns io.EOF. However, we do not set fr.rc, - // allowing future attempts at getting a reader to possibly - // succeed if the file turns up later. - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - default: - return nil, err - } - } - - fr.rc = rc - - if fr.brd == nil { - fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) - } else { - fr.brd.Reset(fr.rc) - } - - return fr.brd, nil -} - -// resetReader resets the reader, forcing the read method to open up a new -// connection and rebuild the buffered reader. This should be called when the -// offset and the reader will become out of sync, such as during a seek -// operation. -func (fr *fileReader) reset() { - if fr.err != nil { - return - } - if fr.rc != nil { - fr.rc.Close() - fr.rc = nil - } -} - -func (fr *fileReader) closeWithErr(err error) error { - if fr.err != nil { - return fr.err - } - - fr.err = err - - // close and release reader chain - if fr.rc != nil { - fr.rc.Close() - } - - fr.rc = nil - fr.brd = nil - - return fr.err -} diff --git a/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go b/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go deleted file mode 100644 index 392898933..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go +++ /dev/null @@ -1,114 +0,0 @@ -package storage - -import ( - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/driver" - "github.com/opencontainers/go-digest" -) - -func emit(format string, a ...interface{}) { - fmt.Printf(format+"\n", a...) -} - -// MarkAndSweep performs a mark and sweep of registry data -func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, dryRun bool) error { - repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) - if !ok { - return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator") - } - - // mark - markSet := make(map[digest.Digest]struct{}) - err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error { - emit(repoName) - - var err error - named, err := reference.WithName(repoName) - if err != nil { - return fmt.Errorf("failed to parse repo name %s: %v", repoName, err) - } - repository, err := registry.Repository(ctx, named) - if err != nil { - return fmt.Errorf("failed to construct repository: %v", err) - } - - manifestService, err := repository.Manifests(ctx) - if err != nil { - return fmt.Errorf("failed to construct manifest service: %v", err) - } - - manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator) - if !ok { - return fmt.Errorf("unable to convert ManifestService into ManifestEnumerator") - } - - err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { - // Mark the manifest's blob - emit("%s: marking manifest %s ", repoName, dgst) - markSet[dgst] = struct{}{} - - manifest, err := manifestService.Get(ctx, dgst) - if err != nil { - return fmt.Errorf("failed to retrieve manifest for digest %v: %v", dgst, err) - } - - descriptors := manifest.References() - for _, descriptor := range descriptors { - markSet[descriptor.Digest] = struct{}{} - emit("%s: marking blob %s", repoName, descriptor.Digest) - } - - return nil - }) - - if err != nil { - // In certain situations such as unfinished uploads, deleting all - // tags in S3 or removing the _manifests folder manually, this - // error may be of type PathNotFound. - // - // In these cases we can continue marking other manifests safely. - if _, ok := err.(driver.PathNotFoundError); ok { - return nil - } - } - - return err - }) - - if err != nil { - return fmt.Errorf("failed to mark: %v", err) - } - - // sweep - blobService := registry.Blobs() - deleteSet := make(map[digest.Digest]struct{}) - err = blobService.Enumerate(ctx, func(dgst digest.Digest) error { - // check if digest is in markSet. If not, delete it! - if _, ok := markSet[dgst]; !ok { - deleteSet[dgst] = struct{}{} - } - return nil - }) - if err != nil { - return fmt.Errorf("error enumerating blobs: %v", err) - } - emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet)) - // Construct vacuum - vacuum := NewVacuum(ctx, storageDriver) - for dgst := range deleteSet { - emit("blob eligible for deletion: %s", dgst) - if dryRun { - continue - } - err = vacuum.RemoveBlob(string(dgst)) - if err != nil { - return fmt.Errorf("failed to delete blob %s: %v", dgst, err) - } - } - - return err -} diff --git a/vendor/github.com/docker/distribution/registry/storage/io.go b/vendor/github.com/docker/distribution/registry/storage/io.go deleted file mode 100644 index c1be3b771..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/io.go +++ /dev/null @@ -1,71 +0,0 @@ -package storage - -import ( - "errors" - "io" - "io/ioutil" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" -) - -const ( - maxBlobGetSize = 4 << 20 -) - -func getContent(ctx context.Context, driver driver.StorageDriver, p string) ([]byte, error) { - r, err := driver.Reader(ctx, p, 0) - if err != nil { - return nil, err - } - - return readAllLimited(r, maxBlobGetSize) -} - -func readAllLimited(r io.Reader, limit int64) ([]byte, error) { - r = limitReader(r, limit) - return ioutil.ReadAll(r) -} - -// limitReader returns a new reader limited to n bytes. Unlike io.LimitReader, -// this returns an error when the limit reached. -func limitReader(r io.Reader, n int64) io.Reader { - return &limitedReader{r: r, n: n} -} - -// limitedReader implements a reader that errors when the limit is reached. -// -// Partially cribbed from net/http.MaxBytesReader. -type limitedReader struct { - r io.Reader // underlying reader - n int64 // max bytes remaining - err error // sticky error -} - -func (l *limitedReader) Read(p []byte) (n int, err error) { - if l.err != nil { - return 0, l.err - } - if len(p) == 0 { - return 0, nil - } - // If they asked for a 32KB byte read but only 5 bytes are - // remaining, no need to read 32KB. 6 bytes will answer the - // question of the whether we hit the limit or go past it. - if int64(len(p)) > l.n+1 { - p = p[:l.n+1] - } - n, err = l.r.Read(p) - - if int64(n) <= l.n { - l.n -= int64(n) - l.err = err - return n, err - } - - n = int(l.n) - l.n = 0 - - l.err = errors.New("storage: read exceeds limit") - return n, l.err -} diff --git a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go deleted file mode 100644 index a1929eed3..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go +++ /dev/null @@ -1,470 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "path" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/uuid" - "github.com/opencontainers/go-digest" -) - -// linkPathFunc describes a function that can resolve a link based on the -// repository name and digest. -type linkPathFunc func(name string, dgst digest.Digest) (string, error) - -// linkedBlobStore provides a full BlobService that namespaces the blobs to a -// given repository. Effectively, it manages the links in a given repository -// that grant access to the global blob store. -type linkedBlobStore struct { - *blobStore - registry *registry - blobServer distribution.BlobServer - blobAccessController distribution.BlobDescriptorService - repository distribution.Repository - ctx context.Context // only to be used where context can't come through method args - deleteEnabled bool - resumableDigestEnabled bool - - // linkPathFns specifies one or more path functions allowing one to - // control the repository blob link set to which the blob store - // dispatches. This is required because manifest and layer blobs have not - // yet been fully merged. At some point, this functionality should be - // removed the blob links folder should be merged. The first entry is - // treated as the "canonical" link location and will be used for writes. - linkPathFns []linkPathFunc - - // linkDirectoryPathSpec locates the root directories in which one might find links - linkDirectoryPathSpec pathSpec -} - -var _ distribution.BlobStore = &linkedBlobStore{} - -func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return lbs.blobAccessController.Stat(ctx, dgst) -} - -func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - canonical, err := lbs.Stat(ctx, dgst) // access check - if err != nil { - return nil, err - } - - return lbs.blobStore.Get(ctx, canonical.Digest) -} - -func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - canonical, err := lbs.Stat(ctx, dgst) // access check - if err != nil { - return nil, err - } - - return lbs.blobStore.Open(ctx, canonical.Digest) -} - -func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - canonical, err := lbs.Stat(ctx, dgst) // access check - if err != nil { - return err - } - - if canonical.MediaType != "" { - // Set the repository local content type. - w.Header().Set("Content-Type", canonical.MediaType) - } - - return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) -} - -func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst := digest.FromBytes(p) - // Place the data in the blob store first. - desc, err := lbs.blobStore.Put(ctx, mediaType, p) - if err != nil { - context.GetLogger(ctx).Errorf("error putting into main store: %v", err) - return distribution.Descriptor{}, err - } - - if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { - return distribution.Descriptor{}, err - } - - // TODO(stevvooe): Write out mediatype if incoming differs from what is - // returned by Put above. Note that we should allow updates for a given - // repository. - - return desc, lbs.linkBlob(ctx, desc) -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*distribution.CreateOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -// Writer begins a blob write session, returning a handle. -func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") - - var opts distribution.CreateOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - if opts.Mount.ShouldMount { - desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest(), opts.Mount.Stat) - if err == nil { - // Mount successful, no need to initiate an upload session - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - } - } - - uuid := uuid.Generate().String() - startedAt := time.Now().UTC() - - path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Named().Name(), - id: uuid, - }) - - if err != nil { - return nil, err - } - - startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Named().Name(), - id: uuid, - }) - - if err != nil { - return nil, err - } - - // Write a startedat file for this upload - if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { - return nil, err - } - - return lbs.newBlobUpload(ctx, uuid, path, startedAt, false) -} - -func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") - - startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Named().Name(), - id: id, - }) - - if err != nil { - return nil, err - } - - startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) - if err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return nil, distribution.ErrBlobUploadUnknown - default: - return nil, err - } - } - - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return nil, err - } - - path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Named().Name(), - id: id, - }) - - if err != nil { - return nil, err - } - - return lbs.newBlobUpload(ctx, id, path, startedAt, true) -} - -func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { - if !lbs.deleteEnabled { - return distribution.ErrUnsupported - } - - // Ensure the blob is available for deletion - _, err := lbs.blobAccessController.Stat(ctx, dgst) - if err != nil { - return err - } - - err = lbs.blobAccessController.Clear(ctx, dgst) - if err != nil { - return err - } - - return nil -} - -func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest.Digest) error) error { - rootPath, err := pathFor(lbs.linkDirectoryPathSpec) - if err != nil { - return err - } - err = Walk(ctx, lbs.blobStore.driver, rootPath, func(fileInfo driver.FileInfo) error { - // exit early if directory... - if fileInfo.IsDir() { - return nil - } - filePath := fileInfo.Path() - - // check if it's a link - _, fileName := path.Split(filePath) - if fileName != "link" { - return nil - } - - // read the digest found in link - digest, err := lbs.blobStore.readlink(ctx, filePath) - if err != nil { - return err - } - - // ensure this conforms to the linkPathFns - _, err = lbs.Stat(ctx, digest) - if err != nil { - // we expect this error to occur so we move on - if err == distribution.ErrBlobUnknown { - return nil - } - return err - } - - err = ingestor(digest) - if err != nil { - return err - } - - return nil - }) - - if err != nil { - return err - } - - return nil -} - -func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest, sourceStat *distribution.Descriptor) (distribution.Descriptor, error) { - var stat distribution.Descriptor - if sourceStat == nil { - // look up the blob info from the sourceRepo if not already provided - repo, err := lbs.registry.Repository(ctx, sourceRepo) - if err != nil { - return distribution.Descriptor{}, err - } - stat, err = repo.Blobs(ctx).Stat(ctx, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - } else { - // use the provided blob info - stat = *sourceStat - } - - desc := distribution.Descriptor{ - Size: stat.Size, - - // NOTE(stevvooe): The central blob store firewalls media types from - // other users. The caller should look this up and override the value - // for the specific repository. - MediaType: "application/octet-stream", - Digest: dgst, - } - return desc, lbs.linkBlob(ctx, desc) -} - -// newBlobUpload allocates a new upload controller with the given state. -func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time, append bool) (distribution.BlobWriter, error) { - fw, err := lbs.driver.Writer(ctx, path, append) - if err != nil { - return nil, err - } - - bw := &blobWriter{ - ctx: ctx, - blobStore: lbs, - id: uuid, - startedAt: startedAt, - digester: digest.Canonical.Digester(), - fileWriter: fw, - driver: lbs.driver, - path: path, - resumableDigestEnabled: lbs.resumableDigestEnabled, - } - - return bw, nil -} - -// linkBlob links a valid, written blob into the registry under the named -// repository for the upload controller. -func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { - dgsts := append([]digest.Digest{canonical.Digest}, aliases...) - - // TODO(stevvooe): Need to write out mediatype for only canonical hash - // since we don't care about the aliases. They are generally unused except - // for tarsum but those versions don't care about mediatype. - - // Don't make duplicate links. - seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) - - // only use the first link - linkPathFn := lbs.linkPathFns[0] - - for _, dgst := range dgsts { - if _, seen := seenDigests[dgst]; seen { - continue - } - seenDigests[dgst] = struct{}{} - - blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) - if err != nil { - return err - } - - if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { - return err - } - } - - return nil -} - -type linkedBlobStatter struct { - *blobStore - repository distribution.Repository - - // linkPathFns specifies one or more path functions allowing one to - // control the repository blob link set to which the blob store - // dispatches. This is required because manifest and layer blobs have not - // yet been fully merged. At some point, this functionality should be - // removed an the blob links folder should be merged. The first entry is - // treated as the "canonical" link location and will be used for writes. - linkPathFns []linkPathFunc -} - -var _ distribution.BlobDescriptorService = &linkedBlobStatter{} - -func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - var ( - found bool - target digest.Digest - ) - - // try the many link path functions until we get success or an error that - // is not PathNotFoundError. - for _, linkPathFn := range lbs.linkPathFns { - var err error - target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) - - if err == nil { - found = true - break // success! - } - - switch err := err.(type) { - case driver.PathNotFoundError: - // do nothing, just move to the next linkPathFn - default: - return distribution.Descriptor{}, err - } - } - - if !found { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - if target != dgst { - // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. - context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) - } - - // TODO(stevvooe): Look up repository local mediatype and replace that on - // the returned descriptor. - - return lbs.blobStore.statter.Stat(ctx, target) -} - -func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { - // clear any possible existence of a link described in linkPathFns - for _, linkPathFn := range lbs.linkPathFns { - blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) - if err != nil { - return err - } - - err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) - if err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - continue // just ignore this error and continue - default: - return err - } - } - } - - return nil -} - -// resolveTargetWithFunc allows us to read a link to a resource with different -// linkPathFuncs to let us try a few different paths before returning not -// found. -func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { - blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) - if err != nil { - return "", err - } - - return lbs.blobStore.readlink(ctx, blobLinkPath) -} - -func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - // The canonical descriptor for a blob is set at the commit phase of upload - return nil -} - -// blobLinkPath provides the path to the blob link, also known as layers. -func blobLinkPath(name string, dgst digest.Digest) (string, error) { - return pathFor(layerLinkPathSpec{name: name, digest: dgst}) -} - -// manifestRevisionLinkPath provides the path to the manifest revision link. -func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { - return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go b/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go deleted file mode 100644 index aee73b85f..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go +++ /dev/null @@ -1,92 +0,0 @@ -package storage - -import ( - "fmt" - - "encoding/json" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/opencontainers/go-digest" -) - -// manifestListHandler is a ManifestHandler that covers schema2 manifest lists. -type manifestListHandler struct { - repository distribution.Repository - blobStore distribution.BlobStore - ctx context.Context -} - -var _ ManifestHandler = &manifestListHandler{} - -func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") - - var m manifestlist.DeserializedManifestList - if err := json.Unmarshal(content, &m); err != nil { - return nil, err - } - - return &m, nil -} - -func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") - - m, ok := manifestList.(*manifestlist.DeserializedManifestList) - if !ok { - return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) - } - - if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { - return "", err - } - - mt, payload, err := m.Payload() - if err != nil { - return "", err - } - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - return revision.Digest, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. As a policy, the registry only tries to -// store valid content, leaving trust policies of that content up to -// consumers. -func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { - var errs distribution.ErrManifestVerification - - if !skipDependencyVerification { - // This manifest service is different from the blob service - // returned by Blob. It uses a linked blob store to ensure that - // only manifests are accessible. - - manifestService, err := ms.repository.Manifests(ctx) - if err != nil { - return err - } - - for _, manifestDescriptor := range mnfst.References() { - exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) - if err != nil && err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - if err != nil || !exists { - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go deleted file mode 100644 index 4cca5157a..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go +++ /dev/null @@ -1,141 +0,0 @@ -package storage - -import ( - "fmt" - - "encoding/json" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/opencontainers/go-digest" -) - -// A ManifestHandler gets and puts manifests of a particular type. -type ManifestHandler interface { - // Unmarshal unmarshals the manifest from a byte slice. - Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest. - Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) -} - -// SkipLayerVerification allows a manifest to be Put before its -// layers are on the filesystem -func SkipLayerVerification() distribution.ManifestServiceOption { - return skipLayerOption{} -} - -type skipLayerOption struct{} - -func (o skipLayerOption) Apply(m distribution.ManifestService) error { - if ms, ok := m.(*manifestStore); ok { - ms.skipDependencyVerification = true - return nil - } - return fmt.Errorf("skip layer verification only valid for manifestStore") -} - -type manifestStore struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context - - skipDependencyVerification bool - - schema1Handler ManifestHandler - schema2Handler ManifestHandler - manifestListHandler ManifestHandler -} - -var _ distribution.ManifestService = &manifestStore{} - -func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") - - _, err := ms.blobStore.Stat(ms.ctx, dgst) - if err != nil { - if err == distribution.ErrBlobUnknown { - return false, nil - } - - return false, err - } - - return true, nil -} - -func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") - - // TODO(stevvooe): Need to check descriptor from above to ensure that the - // mediatype is as we expect for the manifest store. - - content, err := ms.blobStore.Get(ctx, dgst) - if err != nil { - if err == distribution.ErrBlobUnknown { - return nil, distribution.ErrManifestUnknownRevision{ - Name: ms.repository.Named().Name(), - Revision: dgst, - } - } - - return nil, err - } - - var versioned manifest.Versioned - if err = json.Unmarshal(content, &versioned); err != nil { - return nil, err - } - - switch versioned.SchemaVersion { - case 1: - return ms.schema1Handler.Unmarshal(ctx, dgst, content) - case 2: - // This can be an image manifest or a manifest list - switch versioned.MediaType { - case schema2.MediaTypeManifest: - return ms.schema2Handler.Unmarshal(ctx, dgst, content) - case manifestlist.MediaTypeManifestList: - return ms.manifestListHandler.Unmarshal(ctx, dgst, content) - default: - return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} - } - } - - return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) -} - -func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") - - switch manifest.(type) { - case *schema1.SignedManifest: - return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) - case *schema2.DeserializedManifest: - return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) - case *manifestlist.DeserializedManifestList: - return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) - } - - return "", fmt.Errorf("unrecognized manifest type %T", manifest) -} - -// Delete removes the revision of the specified manifest. -func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") - return ms.blobStore.Delete(ctx, dgst) -} - -func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error { - err := ms.blobStore.Enumerate(ctx, func(dgst digest.Digest) error { - err := ingester(dgst) - if err != nil { - return err - } - return nil - }) - return err -} diff --git a/vendor/github.com/docker/distribution/registry/storage/paths.go b/vendor/github.com/docker/distribution/registry/storage/paths.go deleted file mode 100644 index b6d9b9b56..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/paths.go +++ /dev/null @@ -1,490 +0,0 @@ -package storage - -import ( - "fmt" - "path" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - storagePathVersion = "v2" // fixed storage layout version - storagePathRoot = "/docker/registry/" // all driver paths have a prefix - - // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though - // storage path root would configurable for all drivers through this - // package. In reality, we've found it simpler to do this on a per driver - // basis. -) - -// pathFor maps paths based on "object names" and their ids. The "object -// names" mapped by are internal to the storage system. -// -// The path layout in the storage backend is roughly as follows: -// -// /v2 -// -> repositories/ -// ->/ -// -> _manifests/ -// revisions -// -> -// -> link -// tags/ -// -> current/link -// -> index -// -> //link -// -> _layers/ -// -// -> _uploads/ -// data -// startedat -// hashstates// -// -> blob/ -// -// -// The storage backend layout is broken up into a content-addressable blob -// store and repositories. The content-addressable blob store holds most data -// throughout the backend, keyed by algorithm and digests of the underlying -// content. Access to the blob store is controlled through links from the -// repository to blobstore. -// -// A repository is made up of layers, manifests and tags. The layers component -// is just a directory of layers which are "linked" into a repository. A layer -// can only be accessed through a qualified repository name if it is linked in -// the repository. Uploads of layers are managed in the uploads directory, -// which is key by upload id. When all data for an upload is received, the -// data is moved into the blob store and the upload directory is deleted. -// Abandoned uploads can be garbage collected by reading the startedat file -// and removing uploads that have been active for longer than a certain time. -// -// The third component of the repository directory is the manifests store, -// which is made up of a revision store and tag store. Manifests are stored in -// the blob store and linked into the revision store. -// While the registry can save all revisions of a manifest, no relationship is -// implied as to the ordering of changes to a manifest. The tag store provides -// support for name, tag lookups of manifests, using "current/link" under a -// named tag directory. An index is maintained to support deletions of all -// revisions of a given manifest tag. -// -// We cover the path formats implemented by this path mapper below. -// -// Manifests: -// -// manifestRevisionsPathSpec: /v2/repositories//_manifests/revisions/ -// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// -// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link -// -// Tags: -// -// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ -// manifestTagPathSpec: /v2/repositories//_manifests/tags// -// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link -// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ -// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// -// manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link -// -// Blobs: -// -// layerLinkPathSpec: /v2/repositories//_layers///link -// -// Uploads: -// -// uploadDataPathSpec: /v2/repositories//_uploads//data -// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat -// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// -// -// Blob Store: -// -// blobsPathSpec: /v2/blobs/ -// blobPathSpec: /v2/blobs/// -// blobDataPathSpec: /v2/blobs////data -// blobMediaTypePathSpec: /v2/blobs////data -// -// For more information on the semantic meaning of each path and their -// contents, please see the path spec documentation. -func pathFor(spec pathSpec) (string, error) { - - // Switch on the path object type and return the appropriate path. At - // first glance, one may wonder why we don't use an interface to - // accomplish this. By keep the formatting separate from the pathSpec, we - // keep separate the path generation componentized. These specs could be - // passed to a completely different mapper implementation and generate a - // different set of paths. - // - // For example, imagine migrating from one backend to the other: one could - // build a filesystem walker that converts a string path in one version, - // to an intermediate path object, than can be consumed and mapped by the - // other version. - - rootPrefix := []string{storagePathRoot, storagePathVersion} - repoPrefix := append(rootPrefix, "repositories") - - switch v := spec.(type) { - - case manifestRevisionsPathSpec: - return path.Join(append(repoPrefix, v.name, "_manifests", "revisions")...), nil - - case manifestRevisionPathSpec: - components, err := digestPathComponents(v.revision, false) - if err != nil { - return "", err - } - - return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil - case manifestRevisionLinkPathSpec: - root, err := pathFor(manifestRevisionPathSpec{ - name: v.name, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "link"), nil - case manifestTagsPathSpec: - return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil - case manifestTagPathSpec: - root, err := pathFor(manifestTagsPathSpec{ - name: v.name, - }) - - if err != nil { - return "", err - } - - return path.Join(root, v.tag), nil - case manifestTagCurrentPathSpec: - root, err := pathFor(manifestTagPathSpec{ - name: v.name, - tag: v.tag, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "current", "link"), nil - case manifestTagIndexPathSpec: - root, err := pathFor(manifestTagPathSpec{ - name: v.name, - tag: v.tag, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "index"), nil - case manifestTagIndexEntryLinkPathSpec: - root, err := pathFor(manifestTagIndexEntryPathSpec{ - name: v.name, - tag: v.tag, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "link"), nil - case manifestTagIndexEntryPathSpec: - root, err := pathFor(manifestTagIndexPathSpec{ - name: v.name, - tag: v.tag, - }) - - if err != nil { - return "", err - } - - components, err := digestPathComponents(v.revision, false) - if err != nil { - return "", err - } - - return path.Join(root, path.Join(components...)), nil - case layerLinkPathSpec: - components, err := digestPathComponents(v.digest, false) - if err != nil { - return "", err - } - - // TODO(stevvooe): Right now, all blobs are linked under "_layers". If - // we have future migrations, we may want to rename this to "_blobs". - // A migration strategy would simply leave existing items in place and - // write the new paths, commit a file then delete the old files. - - blobLinkPathComponents := append(repoPrefix, v.name, "_layers") - - return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil - case blobsPathSpec: - blobsPathPrefix := append(rootPrefix, "blobs") - return path.Join(blobsPathPrefix...), nil - case blobPathSpec: - components, err := digestPathComponents(v.digest, true) - if err != nil { - return "", err - } - - blobPathPrefix := append(rootPrefix, "blobs") - return path.Join(append(blobPathPrefix, components...)...), nil - case blobDataPathSpec: - components, err := digestPathComponents(v.digest, true) - if err != nil { - return "", err - } - - components = append(components, "data") - blobPathPrefix := append(rootPrefix, "blobs") - return path.Join(append(blobPathPrefix, components...)...), nil - - case uploadDataPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil - case uploadStartedAtPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil - case uploadHashStatePathSpec: - offset := fmt.Sprintf("%d", v.offset) - if v.list { - offset = "" // Limit to the prefix for listing offsets. - } - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil - case repositoriesRootPathSpec: - return path.Join(repoPrefix...), nil - default: - // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). - return "", fmt.Errorf("unknown path spec: %#v", v) - } -} - -// pathSpec is a type to mark structs as path specs. There is no -// implementation because we'd like to keep the specs and the mappers -// decoupled. -type pathSpec interface { - pathSpec() -} - -// manifestRevisionsPathSpec describes the directory path for -// a manifest revision. -type manifestRevisionsPathSpec struct { - name string -} - -func (manifestRevisionsPathSpec) pathSpec() {} - -// manifestRevisionPathSpec describes the components of the directory path for -// a manifest revision. -type manifestRevisionPathSpec struct { - name string - revision digest.Digest -} - -func (manifestRevisionPathSpec) pathSpec() {} - -// manifestRevisionLinkPathSpec describes the path components required to look -// up the data link for a revision of a manifest. If this file is not present, -// the manifest blob is not available in the given repo. The contents of this -// file should just be the digest. -type manifestRevisionLinkPathSpec struct { - name string - revision digest.Digest -} - -func (manifestRevisionLinkPathSpec) pathSpec() {} - -// manifestTagsPathSpec describes the path elements required to point to the -// manifest tags directory. -type manifestTagsPathSpec struct { - name string -} - -func (manifestTagsPathSpec) pathSpec() {} - -// manifestTagPathSpec describes the path elements required to point to the -// manifest tag links files under a repository. These contain a blob id that -// can be used to look up the data and signatures. -type manifestTagPathSpec struct { - name string - tag string -} - -func (manifestTagPathSpec) pathSpec() {} - -// manifestTagCurrentPathSpec describes the link to the current revision for a -// given tag. -type manifestTagCurrentPathSpec struct { - name string - tag string -} - -func (manifestTagCurrentPathSpec) pathSpec() {} - -// manifestTagCurrentPathSpec describes the link to the index of revisions -// with the given tag. -type manifestTagIndexPathSpec struct { - name string - tag string -} - -func (manifestTagIndexPathSpec) pathSpec() {} - -// manifestTagIndexEntryPathSpec contains the entries of the index by revision. -type manifestTagIndexEntryPathSpec struct { - name string - tag string - revision digest.Digest -} - -func (manifestTagIndexEntryPathSpec) pathSpec() {} - -// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a -// manifest with given tag within the index. -type manifestTagIndexEntryLinkPathSpec struct { - name string - tag string - revision digest.Digest -} - -func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} - -// blobLinkPathSpec specifies a path for a blob link, which is a file with a -// blob id. The blob link will contain a content addressable blob id reference -// into the blob store. The format of the contents is as follows: -// -// : -// -// The following example of the file contents is more illustrative: -// -// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 -// -// This indicates that there is a blob with the id/digest, calculated via -// sha256 that can be fetched from the blob store. -type layerLinkPathSpec struct { - name string - digest digest.Digest -} - -func (layerLinkPathSpec) pathSpec() {} - -// blobAlgorithmReplacer does some very simple path sanitization for user -// input. Paths should be "safe" before getting this far due to strict digest -// requirements but we can add further path conversion here, if needed. -var blobAlgorithmReplacer = strings.NewReplacer( - "+", "/", - ".", "/", - ";", "/", -) - -// blobsPathSpec contains the path for the blobs directory -type blobsPathSpec struct{} - -func (blobsPathSpec) pathSpec() {} - -// blobPathSpec contains the path for the registry global blob store. -type blobPathSpec struct { - digest digest.Digest -} - -func (blobPathSpec) pathSpec() {} - -// blobDataPathSpec contains the path for the registry global blob store. For -// now, this contains layer data, exclusively. -type blobDataPathSpec struct { - digest digest.Digest -} - -func (blobDataPathSpec) pathSpec() {} - -// uploadDataPathSpec defines the path parameters of the data file for -// uploads. -type uploadDataPathSpec struct { - name string - id string -} - -func (uploadDataPathSpec) pathSpec() {} - -// uploadDataPathSpec defines the path parameters for the file that stores the -// start time of an uploads. If it is missing, the upload is considered -// unknown. Admittedly, the presence of this file is an ugly hack to make sure -// we have a way to cleanup old or stalled uploads that doesn't rely on driver -// FileInfo behavior. If we come up with a more clever way to do this, we -// should remove this file immediately and rely on the startetAt field from -// the client to enforce time out policies. -type uploadStartedAtPathSpec struct { - name string - id string -} - -func (uploadStartedAtPathSpec) pathSpec() {} - -// uploadHashStatePathSpec defines the path parameters for the file that stores -// the hash function state of an upload at a specific byte offset. If `list` is -// set, then the path mapper will generate a list prefix for all hash state -// offsets for the upload identified by the name, id, and alg. -type uploadHashStatePathSpec struct { - name string - id string - alg digest.Algorithm - offset int64 - list bool -} - -func (uploadHashStatePathSpec) pathSpec() {} - -// repositoriesRootPathSpec returns the root of repositories -type repositoriesRootPathSpec struct { -} - -func (repositoriesRootPathSpec) pathSpec() {} - -// digestPathComponents provides a consistent path breakdown for a given -// digest. For a generic digest, it will be as follows: -// -// / -// -// If multilevel is true, the first two bytes of the digest will separate -// groups of digest folder. It will be as follows: -// -// // -// -func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { - if err := dgst.Validate(); err != nil { - return nil, err - } - - algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) - hex := dgst.Hex() - prefix := []string{algorithm} - - var suffix []string - - if multilevel { - suffix = append(suffix, hex[:2]) - } - - suffix = append(suffix, hex) - - return append(prefix, suffix...), nil -} - -// Reconstructs a digest from a path -func digestFromPath(digestPath string) (digest.Digest, error) { - - digestPath = strings.TrimSuffix(digestPath, "/data") - dir, hex := path.Split(digestPath) - dir = path.Dir(dir) - dir, next := path.Split(dir) - - // next is either the algorithm OR the first two characters in the hex string - var algo string - if next == hex[:2] { - algo = path.Base(dir) - } else { - algo = next - } - - dgst := digest.NewDigestFromHex(algo, hex) - return dgst, dgst.Validate() -} diff --git a/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go b/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go deleted file mode 100644 index d6cae4e7b..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go +++ /dev/null @@ -1,139 +0,0 @@ -package storage - -import ( - "path" - "strings" - "time" - - "github.com/docker/distribution/context" - storageDriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/uuid" - log "github.com/sirupsen/logrus" -) - -// uploadData stored the location of temporary files created during a layer upload -// along with the date the upload was started -type uploadData struct { - containingDir string - startedAt time.Time -} - -func newUploadData() uploadData { - return uploadData{ - containingDir: "", - // default to far in future to protect against missing startedat - startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), - } -} - -// PurgeUploads deletes files from the upload directory -// created before olderThan. The list of files deleted and errors -// encountered are returned -func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { - log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) - uploadData, errors := getOutstandingUploads(ctx, driver) - var deleted []string - for _, uploadData := range uploadData { - if uploadData.startedAt.Before(olderThan) { - var err error - log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", - uploadData.containingDir, uploadData.startedAt, olderThan) - if actuallyDelete { - err = driver.Delete(ctx, uploadData.containingDir) - } - if err == nil { - deleted = append(deleted, uploadData.containingDir) - } else { - errors = append(errors, err) - } - } - } - - log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) - return deleted, errors -} - -// getOutstandingUploads walks the upload directory, collecting files -// which could be eligible for deletion. The only reliable way to -// classify the age of a file is with the date stored in the startedAt -// file, so gather files by UUID with a date from startedAt. -func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { - var errors []error - uploads := make(map[string]uploadData, 0) - - inUploadDir := false - root, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return uploads, append(errors, err) - } - - err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { - filePath := fileInfo.Path() - _, file := path.Split(filePath) - if file[0] == '_' { - // Reserved directory - inUploadDir = (file == "_uploads") - - if fileInfo.IsDir() && !inUploadDir { - return ErrSkipDir - } - - } - - uuid, isContainingDir := uuidFromPath(filePath) - if uuid == "" { - // Cannot reliably delete - return nil - } - ud, ok := uploads[uuid] - if !ok { - ud = newUploadData() - } - if isContainingDir { - ud.containingDir = filePath - } - if file == "startedat" { - if t, err := readStartedAtFile(driver, filePath); err == nil { - ud.startedAt = t - } else { - errors = pushError(errors, filePath, err) - } - - } - - uploads[uuid] = ud - return nil - }) - - if err != nil { - errors = pushError(errors, root, err) - } - return uploads, errors -} - -// uuidFromPath extracts the upload UUID from a given path -// If the UUID is the last path component, this is the containing -// directory for all upload files -func uuidFromPath(path string) (string, bool) { - components := strings.Split(path, "/") - for i := len(components) - 1; i >= 0; i-- { - if u, err := uuid.Parse(components[i]); err == nil { - return u.String(), i == len(components)-1 - } - } - return "", false -} - -// readStartedAtFile reads the date from an upload's startedAtFile -func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { - // todo:(richardscothern) - pass in a context - startedAtBytes, err := driver.GetContent(context.Background(), path) - if err != nil { - return time.Now(), err - } - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return time.Now(), err - } - return startedAt, nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/registry.go b/vendor/github.com/docker/distribution/registry/storage/registry.go deleted file mode 100644 index 20525ffb3..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/registry.go +++ /dev/null @@ -1,306 +0,0 @@ -package storage - -import ( - "regexp" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/libtrust" -) - -// registry is the top-level implementation of Registry for use in the storage -// package. All instances should descend from this object. -type registry struct { - blobStore *blobStore - blobServer *blobServer - statter *blobStatter // global statter service. - blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider - deleteEnabled bool - resumableDigestEnabled bool - schema1SigningKey libtrust.PrivateKey - blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory - manifestURLs manifestURLs -} - -// manifestURLs holds regular expressions for controlling manifest URL whitelisting -type manifestURLs struct { - allow *regexp.Regexp - deny *regexp.Regexp -} - -// RegistryOption is the type used for functional options for NewRegistry. -type RegistryOption func(*registry) error - -// EnableRedirect is a functional option for NewRegistry. It causes the backend -// blob server to attempt using (StorageDriver).URLFor to serve all blobs. -func EnableRedirect(registry *registry) error { - registry.blobServer.redirect = true - return nil -} - -// EnableDelete is a functional option for NewRegistry. It enables deletion on -// the registry. -func EnableDelete(registry *registry) error { - registry.deleteEnabled = true - return nil -} - -// DisableDigestResumption is a functional option for NewRegistry. It should be -// used if the registry is acting as a caching proxy. -func DisableDigestResumption(registry *registry) error { - registry.resumableDigestEnabled = false - return nil -} - -// ManifestURLsAllowRegexp is a functional option for NewRegistry. -func ManifestURLsAllowRegexp(r *regexp.Regexp) RegistryOption { - return func(registry *registry) error { - registry.manifestURLs.allow = r - return nil - } -} - -// ManifestURLsDenyRegexp is a functional option for NewRegistry. -func ManifestURLsDenyRegexp(r *regexp.Regexp) RegistryOption { - return func(registry *registry) error { - registry.manifestURLs.deny = r - return nil - } -} - -// Schema1SigningKey returns a functional option for NewRegistry. It sets the -// key for signing all schema1 manifests. -func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption { - return func(registry *registry) error { - registry.schema1SigningKey = key - return nil - } -} - -// BlobDescriptorServiceFactory returns a functional option for NewRegistry. It sets the -// factory to create BlobDescriptorServiceFactory middleware. -func BlobDescriptorServiceFactory(factory distribution.BlobDescriptorServiceFactory) RegistryOption { - return func(registry *registry) error { - registry.blobDescriptorServiceFactory = factory - return nil - } -} - -// BlobDescriptorCacheProvider returns a functional option for -// NewRegistry. It creates a cached blob statter for use by the -// registry. -func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption { - // TODO(aaronl): The duplication of statter across several objects is - // ugly, and prevents us from using interface types in the registry - // struct. Ideally, blobStore and blobServer should be lazily - // initialized, and use the current value of - // blobDescriptorCacheProvider. - return func(registry *registry) error { - if blobDescriptorCacheProvider != nil { - statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter) - registry.blobStore.statter = statter - registry.blobServer.statter = statter - registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider - } - return nil - } -} - -// NewRegistry creates a new registry instance from the provided driver. The -// resulting registry may be shared by multiple goroutines but is cheap to -// allocate. If the Redirect option is specified, the backend blob server will -// attempt to use (StorageDriver).URLFor to serve all blobs. -func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) { - // create global statter - statter := &blobStatter{ - driver: driver, - } - - bs := &blobStore{ - driver: driver, - statter: statter, - } - - registry := ®istry{ - blobStore: bs, - blobServer: &blobServer{ - driver: driver, - statter: statter, - pathFn: bs.path, - }, - statter: statter, - resumableDigestEnabled: true, - } - - for _, option := range options { - if err := option(registry); err != nil { - return nil, err - } - } - - return registry, nil -} - -// Scope returns the namespace scope for a registry. The registry -// will only serve repositories contained within this scope. -func (reg *registry) Scope() distribution.Scope { - return distribution.GlobalScope -} - -// Repository returns an instance of the repository tied to the registry. -// Instances should not be shared between goroutines but are cheap to -// allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) { - var descriptorCache distribution.BlobDescriptorService - if reg.blobDescriptorCacheProvider != nil { - var err error - descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name()) - if err != nil { - return nil, err - } - } - - return &repository{ - ctx: ctx, - registry: reg, - name: canonicalName, - descriptorCache: descriptorCache, - }, nil -} - -func (reg *registry) Blobs() distribution.BlobEnumerator { - return reg.blobStore -} - -func (reg *registry) BlobStatter() distribution.BlobStatter { - return reg.statter -} - -// repository provides name-scoped access to various services. -type repository struct { - *registry - ctx context.Context - name reference.Named - descriptorCache distribution.BlobDescriptorService -} - -// Name returns the name of the repository. -func (repo *repository) Named() reference.Named { - return repo.name -} - -func (repo *repository) Tags(ctx context.Context) distribution.TagService { - tags := &tagStore{ - repository: repo, - blobStore: repo.registry.blobStore, - } - - return tags -} - -// Manifests returns an instance of ManifestService. Instantiation is cheap and -// may be context sensitive in the future. The instance should be used similar -// to a request local. -func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - manifestLinkPathFns := []linkPathFunc{ - // NOTE(stevvooe): Need to search through multiple locations since - // 2.1.0 unintentionally linked into _layers. - manifestRevisionLinkPath, - blobLinkPath, - } - - manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()} - - var statter distribution.BlobDescriptorService = &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: manifestLinkPathFns, - } - - if repo.registry.blobDescriptorServiceFactory != nil { - statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) - } - - blobStore := &linkedBlobStore{ - ctx: ctx, - blobStore: repo.blobStore, - repository: repo, - deleteEnabled: repo.registry.deleteEnabled, - blobAccessController: statter, - - // TODO(stevvooe): linkPath limits this blob store to only - // manifests. This instance cannot be used for blob checks. - linkPathFns: manifestLinkPathFns, - linkDirectoryPathSpec: manifestDirectoryPathSpec, - } - - ms := &manifestStore{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - schema1Handler: &signedManifestHandler{ - ctx: ctx, - schema1SigningKey: repo.schema1SigningKey, - repository: repo, - blobStore: blobStore, - }, - schema2Handler: &schema2ManifestHandler{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - manifestURLs: repo.registry.manifestURLs, - }, - manifestListHandler: &manifestListHandler{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - }, - } - - // Apply options - for _, option := range options { - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - - return ms, nil -} - -// Blobs returns an instance of the BlobStore. Instantiation is cheap and -// may be context sensitive in the future. The instance should be used similar -// to a request local. -func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { - var statter distribution.BlobDescriptorService = &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: []linkPathFunc{blobLinkPath}, - } - - if repo.descriptorCache != nil { - statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) - } - - if repo.registry.blobDescriptorServiceFactory != nil { - statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) - } - - return &linkedBlobStore{ - registry: repo.registry, - blobStore: repo.blobStore, - blobServer: repo.blobServer, - blobAccessController: statter, - repository: repo, - ctx: ctx, - - // TODO(stevvooe): linkPath limits this blob store to only layers. - // This instance cannot be used for manifest checks. - linkPathFns: []linkPathFunc{blobLinkPath}, - deleteEnabled: repo.registry.deleteEnabled, - resumableDigestEnabled: repo.resumableDigestEnabled, - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go deleted file mode 100644 index 05c53254f..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go +++ /dev/null @@ -1,136 +0,0 @@ -package storage - -import ( - "encoding/json" - "errors" - "fmt" - "net/url" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/opencontainers/go-digest" -) - -var ( - errUnexpectedURL = errors.New("unexpected URL on layer") - errMissingURL = errors.New("missing URL on layer") - errInvalidURL = errors.New("invalid URL on layer") -) - -//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. -type schema2ManifestHandler struct { - repository distribution.Repository - blobStore distribution.BlobStore - ctx context.Context - manifestURLs manifestURLs -} - -var _ ManifestHandler = &schema2ManifestHandler{} - -func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") - - var m schema2.DeserializedManifest - if err := json.Unmarshal(content, &m); err != nil { - return nil, err - } - - return &m, nil -} - -func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") - - m, ok := manifest.(*schema2.DeserializedManifest) - if !ok { - return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) - } - - if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { - return "", err - } - - mt, payload, err := m.Payload() - if err != nil { - return "", err - } - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - return revision.Digest, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. As a policy, the registry only tries to store -// valid content, leaving trust policies of that content up to consumers. -func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { - var errs distribution.ErrManifestVerification - - if skipDependencyVerification { - return nil - } - - manifestService, err := ms.repository.Manifests(ctx) - if err != nil { - return err - } - - blobsService := ms.repository.Blobs(ctx) - - for _, descriptor := range mnfst.References() { - var err error - - switch descriptor.MediaType { - case schema2.MediaTypeForeignLayer: - // Clients download this layer from an external URL, so do not check for - // its presense. - if len(descriptor.URLs) == 0 { - err = errMissingURL - } - allow := ms.manifestURLs.allow - deny := ms.manifestURLs.deny - for _, u := range descriptor.URLs { - var pu *url.URL - pu, err = url.Parse(u) - if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) { - err = errInvalidURL - break - } - } - case schema2.MediaTypeManifest, schema1.MediaTypeManifest: - var exists bool - exists, err = manifestService.Exists(ctx, descriptor.Digest) - if err != nil || !exists { - err = distribution.ErrBlobUnknown // just coerce to unknown. - } - - fallthrough // double check the blob store. - default: - // forward all else to blob storage - if len(descriptor.URLs) == 0 { - _, err = blobsService.Stat(ctx, descriptor.Digest) - } - } - - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest}) - } - } - - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go deleted file mode 100644 index 6ca1c6c8c..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go +++ /dev/null @@ -1,141 +0,0 @@ -package storage - -import ( - "encoding/json" - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/reference" - "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" -) - -// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It -// can unmarshal and put schema1 manifests that have been signed by libtrust. -type signedManifestHandler struct { - repository distribution.Repository - schema1SigningKey libtrust.PrivateKey - blobStore distribution.BlobStore - ctx context.Context -} - -var _ ManifestHandler = &signedManifestHandler{} - -func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") - - var ( - signatures [][]byte - err error - ) - - jsig, err := libtrust.NewJSONSignature(content, signatures...) - if err != nil { - return nil, err - } - - if ms.schema1SigningKey != nil { - if err := jsig.Sign(ms.schema1SigningKey); err != nil { - return nil, err - } - } - - // Extract the pretty JWS - raw, err := jsig.PrettySignature("signatures") - if err != nil { - return nil, err - } - - var sm schema1.SignedManifest - if err := json.Unmarshal(raw, &sm); err != nil { - return nil, err - } - return &sm, nil -} - -func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") - - sm, ok := manifest.(*schema1.SignedManifest) - if !ok { - return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) - } - - if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { - return "", err - } - - mt := schema1.MediaTypeManifest - payload := sm.Canonical - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - return revision.Digest, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. It ensures that the signature is valid for the -// enclosed payload. As a policy, the registry only tries to store valid -// content, leaving trust policies of that content up to consumers. -func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { - var errs distribution.ErrManifestVerification - - if len(mnfst.Name) > reference.NameTotalLengthMax { - errs = append(errs, - distribution.ErrManifestNameInvalid{ - Name: mnfst.Name, - Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), - }) - } - - if !reference.NameRegexp.MatchString(mnfst.Name) { - errs = append(errs, - distribution.ErrManifestNameInvalid{ - Name: mnfst.Name, - Reason: fmt.Errorf("invalid manifest name format"), - }) - } - - if len(mnfst.History) != len(mnfst.FSLayers) { - errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", - len(mnfst.History), len(mnfst.FSLayers))) - } - - if _, err := schema1.Verify(&mnfst); err != nil { - switch err { - case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: - errs = append(errs, distribution.ErrManifestUnverified{}) - default: - if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust - errs = append(errs, distribution.ErrManifestUnverified{}) - } else { - errs = append(errs, err) - } - } - } - - if !skipDependencyVerification { - for _, fsLayer := range mnfst.References() { - _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore.go b/vendor/github.com/docker/distribution/registry/storage/tagstore.go deleted file mode 100644 index d73278869..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/tagstore.go +++ /dev/null @@ -1,191 +0,0 @@ -package storage - -import ( - "path" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/opencontainers/go-digest" -) - -var _ distribution.TagService = &tagStore{} - -// tagStore provides methods to manage manifest tags in a backend storage driver. -// This implementation uses the same on-disk layout as the (now deleted) tag -// store. This provides backward compatibility with current registry deployments -// which only makes use of the Digest field of the returned distribution.Descriptor -// but does not enable full roundtripping of Descriptor objects -type tagStore struct { - repository *repository - blobStore *blobStore -} - -// All returns all tags -func (ts *tagStore) All(ctx context.Context) ([]string, error) { - var tags []string - - pathSpec, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Named().Name(), - }) - if err != nil { - return tags, err - } - - entries, err := ts.blobStore.driver.List(ctx, pathSpec) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Named().Name()} - default: - return tags, err - } - } - - for _, entry := range entries { - _, filename := path.Split(entry) - tags = append(tags, filename) - } - - return tags, nil -} - -// exists returns true if the specified manifest tag exists in the repository. -func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { - tagPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Named().Name(), - tag: tag, - }) - - if err != nil { - return false, err - } - - exists, err := exists(ctx, ts.blobStore.driver, tagPath) - if err != nil { - return false, err - } - - return exists, nil -} - -// Tag tags the digest with the given tag, updating the the store to point at -// the current tag. The digest must point to a manifest. -func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Named().Name(), - tag: tag, - }) - - if err != nil { - return err - } - - lbs := ts.linkedBlobStore(ctx, tag) - - // Link into the index - if err := lbs.linkBlob(ctx, desc); err != nil { - return err - } - - // Overwrite the current link - return ts.blobStore.link(ctx, currentPath, desc.Digest) -} - -// resolve the current revision for name and tag. -func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Named().Name(), - tag: tag, - }) - - if err != nil { - return distribution.Descriptor{}, err - } - - revision, err := ts.blobStore.readlink(ctx, currentPath) - if err != nil { - switch err.(type) { - case storagedriver.PathNotFoundError: - return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} - } - - return distribution.Descriptor{}, err - } - - return distribution.Descriptor{Digest: revision}, nil -} - -// Untag removes the tag association -func (ts *tagStore) Untag(ctx context.Context, tag string) error { - tagPath, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Named().Name(), - tag: tag, - }) - - switch err.(type) { - case storagedriver.PathNotFoundError: - return distribution.ErrTagUnknown{Tag: tag} - case nil: - break - default: - return err - } - - return ts.blobStore.driver.Delete(ctx, tagPath) -} - -// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one -// to index manifest blobs by tag name. While the tag store doesn't map -// precisely to the linked blob store, using this ensures the links are -// managed via the same code path. -func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { - return &linkedBlobStore{ - blobStore: ts.blobStore, - repository: ts.repository, - ctx: ctx, - linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) { - return pathFor(manifestTagIndexEntryLinkPathSpec{ - name: name, - tag: tag, - revision: dgst, - }) - - }}, - } -} - -// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by -// digest, tag entries which point to it need to be recovered to avoid dangling tags. -func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { - allTags, err := ts.All(ctx) - switch err.(type) { - case distribution.ErrRepositoryUnknown: - // This tag store has been initialized but not yet populated - break - case nil: - break - default: - return nil, err - } - - var tags []string - for _, tag := range allTags { - tagLinkPathSpec := manifestTagCurrentPathSpec{ - name: ts.repository.Named().Name(), - tag: tag, - } - - tagLinkPath, err := pathFor(tagLinkPathSpec) - tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) - if err != nil { - return nil, err - } - - if tagDigest == desc.Digest { - tags = append(tags, tag) - } - } - - return tags, nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/util.go b/vendor/github.com/docker/distribution/registry/storage/util.go deleted file mode 100644 index 773d7ba0b..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/util.go +++ /dev/null @@ -1,21 +0,0 @@ -package storage - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" -) - -// Exists provides a utility method to test whether or not a path exists in -// the given driver. -func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { - if _, err := drv.Stat(ctx, path); err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return false, nil - default: - return false, err - } - } - - return true, nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/vacuum.go b/vendor/github.com/docker/distribution/registry/storage/vacuum.go deleted file mode 100644 index 42c8ef605..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/vacuum.go +++ /dev/null @@ -1,67 +0,0 @@ -package storage - -import ( - "path" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/storage/driver" - "github.com/opencontainers/go-digest" -) - -// vacuum contains functions for cleaning up repositories and blobs -// These functions will only reliably work on strongly consistent -// storage systems. -// https://en.wikipedia.org/wiki/Consistency_model - -// NewVacuum creates a new Vacuum -func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { - return Vacuum{ - ctx: ctx, - driver: driver, - } -} - -// Vacuum removes content from the filesystem -type Vacuum struct { - driver driver.StorageDriver - ctx context.Context -} - -// RemoveBlob removes a blob from the filesystem -func (v Vacuum) RemoveBlob(dgst string) error { - d, err := digest.Parse(dgst) - if err != nil { - return err - } - - blobPath, err := pathFor(blobPathSpec{digest: d}) - if err != nil { - return err - } - - context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) - - err = v.driver.Delete(v.ctx, blobPath) - if err != nil { - return err - } - - return nil -} - -// RemoveRepository removes a repository directory from the -// filesystem -func (v Vacuum) RemoveRepository(repoName string) error { - rootForRepository, err := pathFor(repositoriesRootPathSpec{}) - if err != nil { - return err - } - repoDir := path.Join(rootForRepository, repoName) - context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) - err = v.driver.Delete(v.ctx, repoDir) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/walk.go b/vendor/github.com/docker/distribution/registry/storage/walk.go deleted file mode 100644 index d979796eb..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/walk.go +++ /dev/null @@ -1,59 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - "sort" - - "github.com/docker/distribution/context" - storageDriver "github.com/docker/distribution/registry/storage/driver" -) - -// ErrSkipDir is used as a return value from onFileFunc to indicate that -// the directory named in the call is to be skipped. It is not returned -// as an error by any function. -var ErrSkipDir = errors.New("skip this directory") - -// WalkFn is called once per file by Walk -// If the returned error is ErrSkipDir and fileInfo refers -// to a directory, the directory will not be entered and Walk -// will continue the traversal. Otherwise Walk will return -type WalkFn func(fileInfo storageDriver.FileInfo) error - -// Walk traverses a filesystem defined within driver, starting -// from the given path, calling f on each file -func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { - children, err := driver.List(ctx, from) - if err != nil { - return err - } - sort.Stable(sort.StringSlice(children)) - for _, child := range children { - // TODO(stevvooe): Calling driver.Stat for every entry is quite - // expensive when running against backends with a slow Stat - // implementation, such as s3. This is very likely a serious - // performance bottleneck. - fileInfo, err := driver.Stat(ctx, child) - if err != nil { - return err - } - err = f(fileInfo) - skipDir := (err == ErrSkipDir) - if err != nil && !skipDir { - return err - } - - if fileInfo.IsDir() && !skipDir { - if err := Walk(ctx, driver, child, f); err != nil { - return err - } - } - } - return nil -} - -// pushError formats an error type given a path and an error -// and pushes it to a slice of errors -func pushError(errors []error, path string, err error) []error { - return append(errors, fmt.Errorf("%s: %s", path, err)) -} diff --git a/vendor/github.com/docker/docker/builder/builder.go b/vendor/github.com/docker/docker/builder/builder.go deleted file mode 100644 index 5a3e2cd8c..000000000 --- a/vendor/github.com/docker/docker/builder/builder.go +++ /dev/null @@ -1,107 +0,0 @@ -// Package builder defines interfaces for any Docker builder to implement. -// -// Historically, only server-side Dockerfile interpreters existed. -// This package allows for other implementations of Docker builders. -package builder - -import ( - "io" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/container" - containerpkg "github.com/docker/docker/container" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/containerfs" - "golang.org/x/net/context" -) - -const ( - // DefaultDockerfileName is the Default filename with Docker commands, read by docker build - DefaultDockerfileName string = "Dockerfile" -) - -// Source defines a location that can be used as a source for the ADD/COPY -// instructions in the builder. -type Source interface { - // Root returns root path for accessing source - Root() containerfs.ContainerFS - // Close allows to signal that the filesystem tree won't be used anymore. - // For Context implementations using a temporary directory, it is recommended to - // delete the temporary directory in Close(). - Close() error - // Hash returns a checksum for a file - Hash(path string) (string, error) -} - -// Backend abstracts calls to a Docker Daemon. -type Backend interface { - ImageBackend - ExecBackend - - // Commit creates a new Docker image from an existing Docker container. - Commit(string, *backend.ContainerCommitConfig) (string, error) - // ContainerCreateWorkdir creates the workdir - ContainerCreateWorkdir(containerID string) error - - CreateImage(config []byte, parent string, platform string) (Image, error) - - ImageCacheBuilder -} - -// ImageBackend are the interface methods required from an image component -type ImageBackend interface { - GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ReleaseableLayer, error) -} - -// ExecBackend contains the interface methods required for executing containers -type ExecBackend interface { - // ContainerAttachRaw attaches to container. - ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error - // ContainerCreate creates a new Docker container and returns potential warnings - ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) - // ContainerRm removes a container specified by `id`. - ContainerRm(name string, config *types.ContainerRmConfig) error - // ContainerKill stops the container execution abruptly. - ContainerKill(containerID string, sig uint64) error - // ContainerStart starts a new container - ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error - // ContainerWait stops processing until the given container is stopped. - ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) -} - -// Result is the output produced by a Builder -type Result struct { - ImageID string - FromImage Image -} - -// ImageCacheBuilder represents a generator for stateful image cache. -type ImageCacheBuilder interface { - // MakeImageCache creates a stateful image cache. - MakeImageCache(cacheFrom []string, platform string) ImageCache -} - -// ImageCache abstracts an image cache. -// (parent image, child runconfig) -> child image -type ImageCache interface { - // GetCache returns a reference to a cached image whose parent equals `parent` - // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. - GetCache(parentID string, cfg *container.Config) (imageID string, err error) -} - -// Image represents a Docker image used by the builder. -type Image interface { - ImageID() string - RunConfig() *container.Config - MarshalJSON() ([]byte, error) - OperatingSystem() string -} - -// ReleaseableLayer is an image layer that can be mounted and released -type ReleaseableLayer interface { - Release() error - Mount() (containerfs.ContainerFS, error) - Commit(platform string) (ReleaseableLayer, error) - DiffID() layer.DiffID -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/archive.go b/vendor/github.com/docker/docker/builder/remotecontext/archive.go deleted file mode 100644 index fc18c5da3..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/archive.go +++ /dev/null @@ -1,129 +0,0 @@ -package remotecontext - -import ( - "io" - "os" - "path/filepath" - - "github.com/docker/docker/builder" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/tarsum" - "github.com/pkg/errors" -) - -type archiveContext struct { - root containerfs.ContainerFS - sums tarsum.FileInfoSums -} - -func (c *archiveContext) Close() error { - return c.root.RemoveAll(c.root.Path()) -} - -func convertPathError(err error, cleanpath string) error { - if err, ok := err.(*os.PathError); ok { - err.Path = cleanpath - return err - } - return err -} - -type modifiableContext interface { - builder.Source - // Remove deletes the entry specified by `path`. - // It is usual for directory entries to delete all its subentries. - Remove(path string) error -} - -// FromArchive returns a build source from a tar stream. -// -// It extracts the tar stream to a temporary folder that is deleted as soon as -// the Context is closed. -// As the extraction happens, a tarsum is calculated for every file, and the set of -// all those sums then becomes the source of truth for all operations on this Context. -// -// Closing tarStream has to be done by the caller. -func FromArchive(tarStream io.Reader) (builder.Source, error) { - root, err := ioutils.TempDir("", "docker-builder") - if err != nil { - return nil, err - } - - // Assume local file system. Since it's coming from a tar file. - tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)} - - // Make sure we clean-up upon error. In the happy case the caller - // is expected to manage the clean-up - defer func() { - if err != nil { - tsc.Close() - } - }() - - decompressedStream, err := archive.DecompressStream(tarStream) - if err != nil { - return nil, err - } - - sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) - if err != nil { - return nil, err - } - - err = chrootarchive.Untar(sum, root, nil) - if err != nil { - return nil, err - } - - tsc.sums = sum.GetSums() - - return tsc, nil -} - -func (c *archiveContext) Root() containerfs.ContainerFS { - return c.root -} - -func (c *archiveContext) Remove(path string) error { - _, fullpath, err := normalize(path, c.root) - if err != nil { - return err - } - return c.root.RemoveAll(fullpath) -} - -func (c *archiveContext) Hash(path string) (string, error) { - cleanpath, fullpath, err := normalize(path, c.root) - if err != nil { - return "", err - } - - rel, err := c.root.Rel(c.root.Path(), fullpath) - if err != nil { - return "", convertPathError(err, cleanpath) - } - - // Use the checksum of the followed path(not the possible symlink) because - // this is the file that is actually copied. - if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { - return tsInfo.Sum(), nil - } - // We set sum to path by default for the case where GetFile returns nil. - // The usual case is if relative path is empty. - return path, nil // backwards compat TODO: see if really needed -} - -func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) { - cleanPath = root.Clean(string(root.Separator()) + path)[1:] - fullPath, err = root.ResolveScopedPath(path, true) - if err != nil { - return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath) - } - if _, err := root.Lstat(fullPath); err != nil { - return "", "", errors.WithStack(convertPathError(err, path)) - } - return -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/detect.go b/vendor/github.com/docker/docker/builder/remotecontext/detect.go deleted file mode 100644 index 38aff6798..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/detect.go +++ /dev/null @@ -1,183 +0,0 @@ -package remotecontext - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" - - "github.com/containerd/continuity/driver" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerfile/parser" - "github.com/docker/docker/builder/dockerignore" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/urlutil" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// ClientSessionRemote is identifier for client-session context transport -const ClientSessionRemote = "client-session" - -// Detect returns a context and dockerfile from remote location or local -// archive. progressReader is only used if remoteURL is actually a URL -// (not empty, and not a Git endpoint). -func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) { - remoteURL := config.Options.RemoteContext - dockerfilePath := config.Options.Dockerfile - - switch { - case remoteURL == "": - remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath) - case remoteURL == ClientSessionRemote: - res, err := parser.Parse(config.Source) - if err != nil { - return nil, nil, err - } - return nil, res, nil - case urlutil.IsGitURL(remoteURL): - remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath) - case urlutil.IsURL(remoteURL): - remote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc) - default: - err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) - } - return -} - -func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) { - defer rc.Close() - c, err := FromArchive(rc) - if err != nil { - return nil, nil, err - } - - return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) -} - -func withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) { - df, err := openAt(c, dockerfilePath) - if err != nil { - if os.IsNotExist(err) { - if dockerfilePath == builder.DefaultDockerfileName { - lowercase := strings.ToLower(dockerfilePath) - if _, err := StatAt(c, lowercase); err == nil { - return withDockerfileFromContext(c, lowercase) - } - } - return nil, nil, errors.Errorf("Cannot locate specified Dockerfile: %s", dockerfilePath) // backwards compatible error - } - c.Close() - return nil, nil, err - } - - res, err := readAndParseDockerfile(dockerfilePath, df) - if err != nil { - return nil, nil, err - } - - df.Close() - - if err := removeDockerfile(c, dockerfilePath); err != nil { - c.Close() - return nil, nil, err - } - - return c, res, nil -} - -func newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) { - c, err := MakeGitContext(gitURL) // TODO: change this to NewLazySource - if err != nil { - return nil, nil, err - } - return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) -} - -func newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) { - var dockerfile io.ReadCloser - dockerfileFoundErr := errors.New("found-dockerfile") - c, err := MakeRemoteContext(url, map[string]func(io.ReadCloser) (io.ReadCloser, error){ - mimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { - dockerfile = rc - return nil, dockerfileFoundErr - }, - // fallback handler (tar context) - "": func(rc io.ReadCloser) (io.ReadCloser, error) { - return progressReader(rc), nil - }, - }) - switch { - case err == dockerfileFoundErr: - res, err := parser.Parse(dockerfile) - return nil, res, err - case err != nil: - return nil, nil, err - } - return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) -} - -func removeDockerfile(c modifiableContext, filesToRemove ...string) error { - f, err := openAt(c, ".dockerignore") - // Note that a missing .dockerignore file isn't treated as an error - switch { - case os.IsNotExist(err): - return nil - case err != nil: - return err - } - excludes, err := dockerignore.ReadAll(f) - if err != nil { - f.Close() - return err - } - f.Close() - filesToRemove = append([]string{".dockerignore"}, filesToRemove...) - for _, fileToRemove := range filesToRemove { - if rm, _ := fileutils.Matches(fileToRemove, excludes); rm { - if err := c.Remove(fileToRemove); err != nil { - logrus.Errorf("failed to remove %s: %v", fileToRemove, err) - } - } - } - return nil -} - -func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) { - br := bufio.NewReader(rc) - if _, err := br.Peek(1); err != nil { - if err == io.EOF { - return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name) - } - return nil, errors.Wrap(err, "unexpected error reading Dockerfile") - } - return parser.Parse(br) -} - -func openAt(remote builder.Source, path string) (driver.File, error) { - fullPath, err := FullPath(remote, path) - if err != nil { - return nil, err - } - return remote.Root().Open(fullPath) -} - -// StatAt is a helper for calling Stat on a path from a source -func StatAt(remote builder.Source, path string) (os.FileInfo, error) { - fullPath, err := FullPath(remote, path) - if err != nil { - return nil, err - } - return remote.Root().Stat(fullPath) -} - -// FullPath is a helper for getting a full path for a path from a source -func FullPath(remote builder.Source, path string) (string, error) { - fullPath, err := remote.Root().ResolveScopedPath(path, true) - if err != nil { - return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error - } - return fullPath, nil -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/errors.go b/vendor/github.com/docker/docker/builder/remotecontext/errors.go deleted file mode 100644 index 8ee33bc60..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/errors.go +++ /dev/null @@ -1,75 +0,0 @@ -package remotecontext - -type notFoundError string - -func (e notFoundError) Error() string { - return string(e) -} - -func (notFoundError) NotFound() {} - -type requestError string - -func (e requestError) Error() string { - return string(e) -} - -func (e requestError) InvalidParameter() {} - -type unauthorizedError string - -func (e unauthorizedError) Error() string { - return string(e) -} - -func (unauthorizedError) Unauthorized() {} - -type forbiddenError string - -func (e forbiddenError) Error() string { - return string(e) -} - -func (forbiddenError) Forbidden() {} - -type dnsError struct { - cause error -} - -func (e dnsError) Error() string { - return e.cause.Error() -} - -func (e dnsError) NotFound() {} - -func (e dnsError) Cause() error { - return e.cause -} - -type systemError struct { - cause error -} - -func (e systemError) Error() string { - return e.cause.Error() -} - -func (e systemError) SystemError() {} - -func (e systemError) Cause() error { - return e.cause -} - -type unknownError struct { - cause error -} - -func (e unknownError) Error() string { - return e.cause.Error() -} - -func (unknownError) Unknown() {} - -func (e unknownError) Cause() error { - return e.cause -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/filehash.go b/vendor/github.com/docker/docker/builder/remotecontext/filehash.go deleted file mode 100644 index 417230297..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/filehash.go +++ /dev/null @@ -1,45 +0,0 @@ -package remotecontext - -import ( - "archive/tar" - "crypto/sha256" - "hash" - "os" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/tarsum" -) - -// NewFileHash returns new hash that is used for the builder cache keys -func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) { - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return nil, err - } - } - hdr, err := archive.FileInfoHeader(name, fi, link) - if err != nil { - return nil, err - } - if err := archive.ReadSecurityXattrToTarHeader(path, hdr); err != nil { - return nil, err - } - tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} - tsh.Reset() // initialize header - return tsh, nil -} - -type tarsumHash struct { - hash.Hash - hdr *tar.Header -} - -// Reset resets the Hash to its initial state. -func (tsh *tarsumHash) Reset() { - // comply with hash.Hash and reset to the state hash had before any writes - tsh.Hash.Reset() - tarsum.WriteV1Header(tsh.hdr, tsh.Hash) -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/generate.go b/vendor/github.com/docker/docker/builder/remotecontext/generate.go deleted file mode 100644 index 0b52d4992..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package remotecontext - -//go:generate protoc --gogoslick_out=. tarsum.proto diff --git a/vendor/github.com/docker/docker/builder/remotecontext/git.go b/vendor/github.com/docker/docker/builder/remotecontext/git.go deleted file mode 100644 index 158bb5ad4..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/git.go +++ /dev/null @@ -1,29 +0,0 @@ -package remotecontext - -import ( - "os" - - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/remotecontext/git" - "github.com/docker/docker/pkg/archive" -) - -// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. -func MakeGitContext(gitURL string) (builder.Source, error) { - root, err := git.Clone(gitURL) - if err != nil { - return nil, err - } - - c, err := archive.Tar(root, archive.Uncompressed) - if err != nil { - return nil, err - } - - defer func() { - // TODO: print errors? - c.Close() - os.RemoveAll(root) - }() - return FromArchive(c) -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go b/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go deleted file mode 100644 index 66f36defd..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go +++ /dev/null @@ -1,100 +0,0 @@ -package remotecontext - -import ( - "encoding/hex" - "os" - "strings" - - "github.com/docker/docker/builder" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/pools" - "github.com/pkg/errors" -) - -// NewLazySource creates a new LazyContext. LazyContext defines a hashed build -// context based on a root directory. Individual files are hashed first time -// they are asked. It is not safe to call methods of LazyContext concurrently. -func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) { - return &lazySource{ - root: root, - sums: make(map[string]string), - }, nil -} - -type lazySource struct { - root containerfs.ContainerFS - sums map[string]string -} - -func (c *lazySource) Root() containerfs.ContainerFS { - return c.root -} - -func (c *lazySource) Close() error { - return nil -} - -func (c *lazySource) Hash(path string) (string, error) { - cleanPath, fullPath, err := normalize(path, c.root) - if err != nil { - return "", err - } - - fi, err := c.root.Lstat(fullPath) - if err != nil { - return "", errors.WithStack(err) - } - - relPath, err := Rel(c.root, fullPath) - if err != nil { - return "", errors.WithStack(convertPathError(err, cleanPath)) - } - - sum, ok := c.sums[relPath] - if !ok { - sum, err = c.prepareHash(relPath, fi) - if err != nil { - return "", err - } - } - - return sum, nil -} - -func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) { - p := c.root.Join(c.root.Path(), relPath) - h, err := NewFileHash(p, relPath, fi) - if err != nil { - return "", errors.Wrapf(err, "failed to create hash for %s", relPath) - } - if fi.Mode().IsRegular() && fi.Size() > 0 { - f, err := c.root.Open(p) - if err != nil { - return "", errors.Wrapf(err, "failed to open %s", relPath) - } - defer f.Close() - if _, err := pools.Copy(h, f); err != nil { - return "", errors.Wrapf(err, "failed to copy file data for %s", relPath) - } - } - sum := hex.EncodeToString(h.Sum(nil)) - c.sums[relPath] = sum - return sum, nil -} - -// Rel makes a path relative to base path. Same as `filepath.Rel` but can also -// handle UUID paths in windows. -func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) { - // filepath.Rel can't handle UUID paths in windows - if basepath.OS() == "windows" { - pfx := basepath.Path() + `\` - if strings.HasPrefix(targpath, pfx) { - p := strings.TrimPrefix(targpath, pfx) - if p == "" { - p = "." - } - return p, nil - } - } - return basepath.Rel(basepath.Path(), targpath) -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go b/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go deleted file mode 100644 index 083d60997..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go +++ /dev/null @@ -1,27 +0,0 @@ -package remotecontext - -import ( - "mime" - "net/http" -) - -// mimeTypes stores the MIME content type. -var mimeTypes = struct { - TextPlain string - OctetStream string -}{"text/plain", "application/octet-stream"} - -// detectContentType returns a best guess representation of the MIME -// content type for the bytes at c. The value detected by -// http.DetectContentType is guaranteed not be nil, defaulting to -// application/octet-stream when a better guess cannot be made. The -// result of this detection is then run through mime.ParseMediaType() -// which separates the actual MIME string from any parameters. -func detectContentType(c []byte) (string, map[string]string, error) { - ct := http.DetectContentType(c) - contentType, args, err := mime.ParseMediaType(ct) - if err != nil { - return "", nil, err - } - return contentType, args, nil -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/remote.go b/vendor/github.com/docker/docker/builder/remotecontext/remote.go deleted file mode 100644 index 6c4073bd4..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/remote.go +++ /dev/null @@ -1,153 +0,0 @@ -package remotecontext - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "regexp" - - "github.com/docker/docker/builder" - "github.com/pkg/errors" -) - -// When downloading remote contexts, limit the amount (in bytes) -// to be read from the response body in order to detect its Content-Type -const maxPreambleLength = 100 - -const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` - -var mimeRe = regexp.MustCompile(acceptableRemoteMIME) - -// MakeRemoteContext downloads a context from remoteURL and returns it. -// -// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of -// maxPreambleLength bytes from the body to help detecting the MIME type. -// Look at acceptableRemoteMIME for more details. -// -// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected -// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). -// In either case, an (assumed) tar stream is passed to FromArchive whose result is returned. -func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (builder.Source, error) { - f, err := GetWithStatusError(remoteURL) - if err != nil { - return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err) - } - defer f.Body.Close() - - var contextReader io.ReadCloser - if contentTypeHandlers != nil { - contentType := f.Header.Get("Content-Type") - clen := f.ContentLength - - contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) - if err != nil { - return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err) - } - defer contextReader.Close() - - // This loop tries to find a content-type handler for the detected content-type. - // If it could not find one from the caller-supplied map, it tries the empty content-type `""` - // which is interpreted as a fallback handler (usually used for raw tar contexts). - for _, ct := range []string{contentType, ""} { - if fn, ok := contentTypeHandlers[ct]; ok { - defer contextReader.Close() - if contextReader, err = fn(contextReader); err != nil { - return nil, err - } - break - } - } - } - - // Pass through - this is a pre-packaged context, presumably - // with a Dockerfile with the right name inside it. - return FromArchive(contextReader) -} - -// GetWithStatusError does an http.Get() and returns an error if the -// status code is 4xx or 5xx. -func GetWithStatusError(address string) (resp *http.Response, err error) { - if resp, err = http.Get(address); err != nil { - if uerr, ok := err.(*url.Error); ok { - if derr, ok := uerr.Err.(*net.DNSError); ok && !derr.IsTimeout { - return nil, dnsError{err} - } - } - return nil, systemError{err} - } - if resp.StatusCode < 400 { - return resp, nil - } - msg := fmt.Sprintf("failed to GET %s with status %s", address, resp.Status) - body, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return nil, errors.Wrap(systemError{err}, msg+": error reading body") - } - - msg += ": " + string(bytes.TrimSpace(body)) - switch resp.StatusCode { - case http.StatusNotFound: - return nil, notFoundError(msg) - case http.StatusBadRequest: - return nil, requestError(msg) - case http.StatusUnauthorized: - return nil, unauthorizedError(msg) - case http.StatusForbidden: - return nil, forbiddenError(msg) - } - return nil, unknownError{errors.New(msg)} -} - -// inspectResponse looks into the http response data at r to determine whether its -// content-type is on the list of acceptable content types for remote build contexts. -// This function returns: -// - a string representation of the detected content-type -// - an io.Reader for the response body -// - an error value which will be non-nil either when something goes wrong while -// reading bytes from r or when the detected content-type is not acceptable. -func inspectResponse(ct string, r io.Reader, clen int64) (string, io.ReadCloser, error) { - plen := clen - if plen <= 0 || plen > maxPreambleLength { - plen = maxPreambleLength - } - - preamble := make([]byte, plen) - rlen, err := r.Read(preamble) - if rlen == 0 { - return ct, ioutil.NopCloser(r), errors.New("empty response") - } - if err != nil && err != io.EOF { - return ct, ioutil.NopCloser(r), err - } - - preambleR := bytes.NewReader(preamble[:rlen]) - bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) - // Some web servers will use application/octet-stream as the default - // content type for files without an extension (e.g. 'Dockerfile') - // so if we receive this value we better check for text content - contentType := ct - if len(ct) == 0 || ct == mimeTypes.OctetStream { - contentType, _, err = detectContentType(preamble) - if err != nil { - return contentType, bodyReader, err - } - } - - contentType = selectAcceptableMIME(contentType) - var cterr error - if len(contentType) == 0 { - cterr = fmt.Errorf("unsupported Content-Type %q", ct) - contentType = ct - } - - return contentType, bodyReader, cterr -} - -func selectAcceptableMIME(ct string) string { - return mimeRe.FindString(ct) -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go deleted file mode 100644 index 78f7470b3..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go +++ /dev/null @@ -1,157 +0,0 @@ -package remotecontext - -import ( - "os" - "sync" - - "github.com/docker/docker/pkg/containerfs" - iradix "github.com/hashicorp/go-immutable-radix" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil" -) - -type hashed interface { - Digest() digest.Digest -} - -// CachableSource is a source that contains cache records for its contents -type CachableSource struct { - mu sync.Mutex - root containerfs.ContainerFS - tree *iradix.Tree - txn *iradix.Txn -} - -// NewCachableSource creates new CachableSource -func NewCachableSource(root string) *CachableSource { - ts := &CachableSource{ - tree: iradix.New(), - root: containerfs.NewLocalContainerFS(root), - } - return ts -} - -// MarshalBinary marshals current cache information to a byte array -func (cs *CachableSource) MarshalBinary() ([]byte, error) { - b := TarsumBackup{Hashes: make(map[string]string)} - root := cs.getRoot() - root.Walk(func(k []byte, v interface{}) bool { - b.Hashes[string(k)] = v.(*fileInfo).sum - return false - }) - return b.Marshal() -} - -// UnmarshalBinary decodes cache information for presented byte array -func (cs *CachableSource) UnmarshalBinary(data []byte) error { - var b TarsumBackup - if err := b.Unmarshal(data); err != nil { - return err - } - txn := iradix.New().Txn() - for p, v := range b.Hashes { - txn.Insert([]byte(p), &fileInfo{sum: v}) - } - cs.mu.Lock() - defer cs.mu.Unlock() - cs.tree = txn.Commit() - return nil -} - -// Scan rescans the cache information from the file system -func (cs *CachableSource) Scan() error { - lc, err := NewLazySource(cs.root) - if err != nil { - return err - } - txn := iradix.New().Txn() - err = cs.root.Walk(cs.root.Path(), func(path string, info os.FileInfo, err error) error { - if err != nil { - return errors.Wrapf(err, "failed to walk %s", path) - } - rel, err := Rel(cs.root, path) - if err != nil { - return err - } - h, err := lc.Hash(rel) - if err != nil { - return err - } - txn.Insert([]byte(rel), &fileInfo{sum: h}) - return nil - }) - if err != nil { - return err - } - cs.mu.Lock() - defer cs.mu.Unlock() - cs.tree = txn.Commit() - return nil -} - -// HandleChange notifies the source about a modification operation -func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { - cs.mu.Lock() - if cs.txn == nil { - cs.txn = cs.tree.Txn() - } - if kind == fsutil.ChangeKindDelete { - cs.txn.Delete([]byte(p)) - cs.mu.Unlock() - return - } - - h, ok := fi.(hashed) - if !ok { - cs.mu.Unlock() - return errors.Errorf("invalid fileinfo: %s", p) - } - - hfi := &fileInfo{ - sum: h.Digest().Hex(), - } - cs.txn.Insert([]byte(p), hfi) - cs.mu.Unlock() - return nil -} - -func (cs *CachableSource) getRoot() *iradix.Node { - cs.mu.Lock() - if cs.txn != nil { - cs.tree = cs.txn.Commit() - cs.txn = nil - } - t := cs.tree - cs.mu.Unlock() - return t.Root() -} - -// Close closes the source -func (cs *CachableSource) Close() error { - return nil -} - -// Hash returns a hash for a single file in the source -func (cs *CachableSource) Hash(path string) (string, error) { - n := cs.getRoot() - // TODO: check this for symlinks - v, ok := n.Get([]byte(path)) - if !ok { - return path, nil - } - return v.(*fileInfo).sum, nil -} - -// Root returns a root directory for the source -func (cs *CachableSource) Root() containerfs.ContainerFS { - return cs.root -} - -type fileInfo struct { - sum string -} - -func (fi *fileInfo) Hash() string { - return fi.sum -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go deleted file mode 100644 index 1d23bbe65..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go +++ /dev/null @@ -1,525 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: tarsum.proto -// DO NOT EDIT! - -/* -Package remotecontext is a generated protocol buffer package. - -It is generated from these files: - tarsum.proto - -It has these top-level messages: - TarsumBackup -*/ -package remotecontext - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type TarsumBackup struct { - Hashes map[string]string `protobuf:"bytes,1,rep,name=Hashes" json:"Hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *TarsumBackup) Reset() { *m = TarsumBackup{} } -func (*TarsumBackup) ProtoMessage() {} -func (*TarsumBackup) Descriptor() ([]byte, []int) { return fileDescriptorTarsum, []int{0} } - -func (m *TarsumBackup) GetHashes() map[string]string { - if m != nil { - return m.Hashes - } - return nil -} - -func init() { - proto.RegisterType((*TarsumBackup)(nil), "remotecontext.TarsumBackup") -} -func (this *TarsumBackup) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*TarsumBackup) - if !ok { - that2, ok := that.(TarsumBackup) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if len(this.Hashes) != len(that1.Hashes) { - return false - } - for i := range this.Hashes { - if this.Hashes[i] != that1.Hashes[i] { - return false - } - } - return true -} -func (this *TarsumBackup) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&remotecontext.TarsumBackup{") - keysForHashes := make([]string, 0, len(this.Hashes)) - for k := range this.Hashes { - keysForHashes = append(keysForHashes, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) - mapStringForHashes := "map[string]string{" - for _, k := range keysForHashes { - mapStringForHashes += fmt.Sprintf("%#v: %#v,", k, this.Hashes[k]) - } - mapStringForHashes += "}" - if this.Hashes != nil { - s = append(s, "Hashes: "+mapStringForHashes+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringTarsum(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *TarsumBackup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hashes) > 0 { - for k := range m.Hashes { - dAtA[i] = 0xa - i++ - v := m.Hashes[k] - mapSize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) - i = encodeVarintTarsum(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintTarsum(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintTarsum(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func encodeFixed64Tarsum(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Tarsum(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintTarsum(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *TarsumBackup) Size() (n int) { - var l int - _ = l - if len(m.Hashes) > 0 { - for k, v := range m.Hashes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) - n += mapEntrySize + 1 + sovTarsum(uint64(mapEntrySize)) - } - } - return n -} - -func sovTarsum(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozTarsum(x uint64) (n int) { - return sovTarsum(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *TarsumBackup) String() string { - if this == nil { - return "nil" - } - keysForHashes := make([]string, 0, len(this.Hashes)) - for k := range this.Hashes { - keysForHashes = append(keysForHashes, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) - mapStringForHashes := "map[string]string{" - for _, k := range keysForHashes { - mapStringForHashes += fmt.Sprintf("%v: %v,", k, this.Hashes[k]) - } - mapStringForHashes += "}" - s := strings.Join([]string{`&TarsumBackup{`, - `Hashes:` + mapStringForHashes + `,`, - `}`, - }, "") - return s -} -func valueToStringTarsum(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *TarsumBackup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TarsumBackup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TarsumBackup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hashes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTarsum - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTarsum - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Hashes == nil { - m.Hashes = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTarsum - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Hashes[mapkey] = mapvalue - } else { - var mapvalue string - m.Hashes[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTarsum(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTarsum - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTarsum(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTarsum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTarsum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTarsum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthTarsum - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTarsum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipTarsum(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthTarsum = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTarsum = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("tarsum.proto", fileDescriptorTarsum) } - -var fileDescriptorTarsum = []byte{ - // 196 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x49, 0x2c, 0x2a, - 0x2e, 0xcd, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x4a, 0xcd, 0xcd, 0x2f, 0x49, - 0x4d, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x51, 0xea, 0x62, 0xe4, 0xe2, 0x09, 0x01, 0xcb, 0x3b, - 0x25, 0x26, 0x67, 0x97, 0x16, 0x08, 0xd9, 0x73, 0xb1, 0x79, 0x24, 0x16, 0x67, 0xa4, 0x16, 0x4b, - 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0xa1, 0x68, 0xd0, 0x43, 0x56, 0xac, 0x07, 0x51, - 0xe9, 0x9a, 0x57, 0x52, 0x54, 0x19, 0x04, 0xd5, 0x26, 0x65, 0xc9, 0xc5, 0x8d, 0x24, 0x2c, 0x24, - 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89, - 0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, - 0x46, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1, - 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, - 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, - 0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0x57, 0x7d, 0x3f, - 0xe0, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/cli/cobra.go b/vendor/github.com/docker/docker/cli/cobra.go deleted file mode 100644 index c7bb39c43..000000000 --- a/vendor/github.com/docker/docker/cli/cobra.go +++ /dev/null @@ -1,150 +0,0 @@ -package cli - -import ( - "fmt" - "strings" - - "github.com/docker/docker/pkg/term" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -// SetupRootCommand sets default usage, help, and error handling for the -// root command. -func SetupRootCommand(rootCmd *cobra.Command) { - cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) - cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) - cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) - cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) - cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) - - rootCmd.SetUsageTemplate(usageTemplate) - rootCmd.SetHelpTemplate(helpTemplate) - rootCmd.SetFlagErrorFunc(FlagErrorFunc) - rootCmd.SetHelpCommand(helpCommand) - - rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") - rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") -} - -// FlagErrorFunc prints an error message which matches the format of the -// docker/docker/cli error messages -func FlagErrorFunc(cmd *cobra.Command, err error) error { - if err == nil { - return nil - } - - usage := "" - if cmd.HasSubCommands() { - usage = "\n\n" + cmd.UsageString() - } - return StatusError{ - Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), - StatusCode: 125, - } -} - -var helpCommand = &cobra.Command{ - Use: "help [command]", - Short: "Help about the command", - PersistentPreRun: func(cmd *cobra.Command, args []string) {}, - PersistentPostRun: func(cmd *cobra.Command, args []string) {}, - RunE: func(c *cobra.Command, args []string) error { - cmd, args, e := c.Root().Find(args) - if cmd == nil || e != nil || len(args) > 0 { - return errors.Errorf("unknown help topic: %v", strings.Join(args, " ")) - } - - helpFunc := cmd.HelpFunc() - helpFunc(cmd, args) - return nil - }, -} - -func hasSubCommands(cmd *cobra.Command) bool { - return len(operationSubCommands(cmd)) > 0 -} - -func hasManagementSubCommands(cmd *cobra.Command) bool { - return len(managementSubCommands(cmd)) > 0 -} - -func operationSubCommands(cmd *cobra.Command) []*cobra.Command { - cmds := []*cobra.Command{} - for _, sub := range cmd.Commands() { - if sub.IsAvailableCommand() && !sub.HasSubCommands() { - cmds = append(cmds, sub) - } - } - return cmds -} - -func wrappedFlagUsages(cmd *cobra.Command) string { - width := 80 - if ws, err := term.GetWinsize(0); err == nil { - width = int(ws.Width) - } - return cmd.Flags().FlagUsagesWrapped(width - 1) -} - -func managementSubCommands(cmd *cobra.Command) []*cobra.Command { - cmds := []*cobra.Command{} - for _, sub := range cmd.Commands() { - if sub.IsAvailableCommand() && sub.HasSubCommands() { - cmds = append(cmds, sub) - } - } - return cmds -} - -var usageTemplate = `Usage: - -{{- if not .HasSubCommands}} {{.UseLine}}{{end}} -{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} - -{{ .Short | trim }} - -{{- if gt .Aliases 0}} - -Aliases: - {{.NameAndAliases}} - -{{- end}} -{{- if .HasExample}} - -Examples: -{{ .Example }} - -{{- end}} -{{- if .HasFlags}} - -Options: -{{ wrappedFlagUsages . | trimRightSpace}} - -{{- end}} -{{- if hasManagementSubCommands . }} - -Management Commands: - -{{- range managementSubCommands . }} - {{rpad .Name .NamePadding }} {{.Short}} -{{- end}} - -{{- end}} -{{- if hasSubCommands .}} - -Commands: - -{{- range operationSubCommands . }} - {{rpad .Name .NamePadding }} {{.Short}} -{{- end}} -{{- end}} - -{{- if .HasSubCommands }} - -Run '{{.CommandPath}} COMMAND --help' for more information on a command. -{{- end}} -` - -var helpTemplate = ` -{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/vendor/github.com/docker/docker/cli/error.go b/vendor/github.com/docker/docker/cli/error.go deleted file mode 100644 index 62f62433b..000000000 --- a/vendor/github.com/docker/docker/cli/error.go +++ /dev/null @@ -1,33 +0,0 @@ -package cli - -import ( - "fmt" - "strings" -) - -// Errors is a list of errors. -// Useful in a loop if you don't want to return the error right away and you want to display after the loop, -// all the errors that happened during the loop. -type Errors []error - -func (errList Errors) Error() string { - if len(errList) < 1 { - return "" - } - - out := make([]string, len(errList)) - for i := range errList { - out[i] = errList[i].Error() - } - return strings.Join(out, ", ") -} - -// StatusError reports an unsuccessful exit by a command. -type StatusError struct { - Status string - StatusCode int -} - -func (e StatusError) Error() string { - return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) -} diff --git a/vendor/github.com/docker/docker/cli/required.go b/vendor/github.com/docker/docker/cli/required.go deleted file mode 100644 index d56bc213a..000000000 --- a/vendor/github.com/docker/docker/cli/required.go +++ /dev/null @@ -1,27 +0,0 @@ -package cli - -import ( - "strings" - - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -// NoArgs validates args and returns an error if there are any args -func NoArgs(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return nil - } - - if cmd.HasSubCommands() { - return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) - } - - return errors.Errorf( - "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) -} diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE new file mode 100644 index 000000000..d511905c1 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE new file mode 100644 index 000000000..5b6e7c66c --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE new file mode 100644 index 000000000..e67cdabd2 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/go-connections/doc.go b/vendor/github.com/docker/go-connections/doc.go deleted file mode 100644 index 43e27247d..000000000 --- a/vendor/github.com/docker/go-connections/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package connections provides libraries to work with network connections. -// This library is divided in several components for specific usage. -package connections diff --git a/vendor/github.com/docker/libcompose/package.go b/vendor/github.com/docker/libcompose/package.go deleted file mode 100644 index c49a9cc6f..000000000 --- a/vendor/github.com/docker/libcompose/package.go +++ /dev/null @@ -1 +0,0 @@ -package libcompose diff --git a/vendor/github.com/go-kit/kit/log/term/LICENSE b/vendor/github.com/go-kit/kit/log/term/LICENSE new file mode 100644 index 000000000..f090cb42f --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/term/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index 89e07ae19..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,136 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - -import ( - "fmt" - "reflect" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" -) - -const googleApis = "type.googleapis.com/" - -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return any.TypeUrl[slash+1:], nil -} - -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) - if err != nil { - return nil, err - } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message -} - -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) - if err != nil { - return nil, err - } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) - } - return reflect.New(t.Elem()).Interface().(proto.Message), nil -} - -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. -// -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { - var err error - d.Message, err = Empty(any) - if err != nil { - return err - } - } - return UnmarshalAny(any, d.Message) - } - - aname, err := AnyMessageName(any) - if err != nil { - return err - } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) - } - return proto.Unmarshal(any.Value, pb) -} - -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - aname, err := AnyMessageName(any) - if err != nil { - return false - } - - return aname == proto.MessageName(pb) -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index c0d595da7..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package ptypes contains code for interacting with well-known types. -*/ -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index 65cb0f8eb..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,102 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" - - durpb "github.com/golang/protobuf/ptypes/duration" -) - -const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 1b3657622..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,125 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" - - tspb "github.com/golang/protobuf/ptypes/timestamp" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := &tspb.Timestamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} diff --git a/vendor/github.com/hashicorp/consul/commands.go b/vendor/github.com/hashicorp/consul/commands.go deleted file mode 100644 index 99406274a..000000000 --- a/vendor/github.com/hashicorp/consul/commands.go +++ /dev/null @@ -1,368 +0,0 @@ -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/hashicorp/consul/command" - "github.com/hashicorp/consul/command/agent" - "github.com/hashicorp/consul/command/base" - "github.com/hashicorp/consul/version" - "github.com/mitchellh/cli" -) - -// Commands is the mapping of all the available Consul commands. -var Commands map[string]cli.CommandFactory - -func init() { - ui := &cli.BasicUi{Writer: os.Stdout, ErrorWriter: os.Stderr} - - Commands = map[string]cli.CommandFactory{ - "agent": func() (cli.Command, error) { - return &agent.Command{ - Command: base.Command{ - Flags: base.FlagSetNone, - Ui: ui, - }, - Revision: version.GitCommit, - Version: version.Version, - VersionPrerelease: version.VersionPrerelease, - HumanVersion: version.GetHumanVersion(), - ShutdownCh: make(chan struct{}), - }, nil - }, - - "configtest": func() (cli.Command, error) { - return &command.ConfigTestCommand{ - Command: base.Command{ - Flags: base.FlagSetNone, - Ui: ui, - }, - }, nil - }, - - "event": func() (cli.Command, error) { - return &command.EventCommand{ - Command: base.Command{ - Flags: base.FlagSetHTTP, - Ui: ui, - }, - }, nil - }, - - "exec": func() (cli.Command, error) { - return &command.ExecCommand{ - ShutdownCh: makeShutdownCh(), - Command: base.Command{ - Flags: base.FlagSetHTTP, - Ui: ui, - }, - }, nil - }, - - "force-leave": func() (cli.Command, error) { - return &command.ForceLeaveCommand{ - Command: base.Command{ - Flags: base.FlagSetClientHTTP, - Ui: ui, - }, - }, nil - }, - - "info": func() (cli.Command, error) { - return &command.InfoCommand{ - Command: base.Command{ - Ui: ui, - Flags: base.FlagSetClientHTTP, - }, - }, nil - }, - - "join": func() (cli.Command, error) { - return &command.JoinCommand{ - Command: base.Command{ - Ui: ui, - Flags: base.FlagSetClientHTTP, - }, - }, nil - }, - - "keygen": func() (cli.Command, error) { - return &command.KeygenCommand{ - Command: base.Command{ - Ui: ui, - Flags: base.FlagSetNone, - }, - }, nil - }, - - "keyring": func() (cli.Command, error) { - return &command.KeyringCommand{ - Command: base.Command{ - Ui: ui, - Flags: base.FlagSetClientHTTP, - }, - }, nil - }, - - "kv": func() (cli.Command, error) { - return &command.KVCommand{ - Command: base.Command{ - Ui: ui, - Flags: base.FlagSetNone, - }, - }, nil - }, - - "kv delete": func() (cli.Command, error) { - return &command.KVDeleteCommand{ - Command: base.Command{ - Ui: ui, - Flags: base.FlagSetHTTP, - }, - }, nil - }, - - "kv get": func() (cli.Command, error) { - return &command.KVGetCommand{ - Command: base.Command{ - Ui: ui, - Flags: base.FlagSetHTTP, - }, - }, nil - }, - - "kv put": func() (cli.Command, error) { - return &command.KVPutCommand{ - Command: base.Command{ - Ui: ui, - Flags: base.FlagSetHTTP, - }, - }, nil - }, - - "kv export": func() (cli.Command, error) { - return &command.KVExportCommand{ - Command: base.Command{ - Ui: ui, - Flags: base.FlagSetHTTP, - }, - }, nil - }, - - "kv import": func() (cli.Command, error) { - return &command.KVImportCommand{ - Command: base.Command{ - Ui: ui, - Flags: base.FlagSetHTTP, - }, - }, nil - }, - - "leave": func() (cli.Command, error) { - return &command.LeaveCommand{ - Command: base.Command{ - Flags: base.FlagSetClientHTTP, - Ui: ui, - }, - }, nil - }, - - "lock": func() (cli.Command, error) { - return &command.LockCommand{ - ShutdownCh: makeShutdownCh(), - Command: base.Command{ - Flags: base.FlagSetHTTP, - Ui: ui, - }, - }, nil - }, - - "maint": func() (cli.Command, error) { - return &command.MaintCommand{ - Command: base.Command{ - Flags: base.FlagSetClientHTTP, - Ui: ui, - }, - }, nil - }, - - "members": func() (cli.Command, error) { - return &command.MembersCommand{ - Command: base.Command{ - Flags: base.FlagSetClientHTTP, - Ui: ui, - }, - }, nil - }, - - "monitor": func() (cli.Command, error) { - return &command.MonitorCommand{ - ShutdownCh: makeShutdownCh(), - Command: base.Command{ - Flags: base.FlagSetClientHTTP, - Ui: ui, - }, - }, nil - }, - - "operator": func() (cli.Command, error) { - return &command.OperatorCommand{ - Command: base.Command{ - Flags: base.FlagSetNone, - Ui: ui, - }, - }, nil - }, - - "operator autopilot": func() (cli.Command, error) { - return &command.OperatorAutopilotCommand{ - Command: base.Command{ - Flags: base.FlagSetNone, - Ui: ui, - }, - }, nil - }, - - "operator autopilot get-config": func() (cli.Command, error) { - return &command.OperatorAutopilotGetCommand{ - Command: base.Command{ - Flags: base.FlagSetHTTP, - Ui: ui, - }, - }, nil - }, - - "operator autopilot set-config": func() (cli.Command, error) { - return &command.OperatorAutopilotSetCommand{ - Command: base.Command{ - Flags: base.FlagSetHTTP, - Ui: ui, - }, - }, nil - }, - - "operator raft": func() (cli.Command, error) { - return &command.OperatorRaftCommand{ - Command: base.Command{ - Flags: base.FlagSetHTTP, - Ui: ui, - }, - }, nil - }, - - "operator raft list-peers": func() (cli.Command, error) { - return &command.OperatorRaftListCommand{ - Command: base.Command{ - Flags: base.FlagSetHTTP, - Ui: ui, - }, - }, nil - }, - - "operator raft remove-peer": func() (cli.Command, error) { - return &command.OperatorRaftRemoveCommand{ - Command: base.Command{ - Flags: base.FlagSetHTTP, - Ui: ui, - }, - }, nil - }, - - "reload": func() (cli.Command, error) { - return &command.ReloadCommand{ - Command: base.Command{ - Flags: base.FlagSetClientHTTP, - Ui: ui, - }, - }, nil - }, - - "rtt": func() (cli.Command, error) { - return &command.RTTCommand{ - Command: base.Command{ - Flags: base.FlagSetClientHTTP, - Ui: ui, - }, - }, nil - }, - - "snapshot": func() (cli.Command, error) { - return &command.SnapshotCommand{ - Ui: ui, - }, nil - }, - - "snapshot restore": func() (cli.Command, error) { - return &command.SnapshotRestoreCommand{ - Command: base.Command{ - Flags: base.FlagSetHTTP, - Ui: ui, - }, - }, nil - }, - - "snapshot save": func() (cli.Command, error) { - return &command.SnapshotSaveCommand{ - Command: base.Command{ - Flags: base.FlagSetHTTP, - Ui: ui, - }, - }, nil - }, - - "snapshot inspect": func() (cli.Command, error) { - return &command.SnapshotInspectCommand{ - Command: base.Command{ - Flags: base.FlagSetNone, - Ui: ui, - }, - }, nil - }, - - "validate": func() (cli.Command, error) { - return &command.ValidateCommand{ - Command: base.Command{ - Flags: base.FlagSetNone, - Ui: ui, - }, - }, nil - }, - - "version": func() (cli.Command, error) { - return &command.VersionCommand{ - HumanVersion: version.GetHumanVersion(), - Ui: ui, - }, nil - }, - - "watch": func() (cli.Command, error) { - return &command.WatchCommand{ - ShutdownCh: makeShutdownCh(), - Command: base.Command{ - Flags: base.FlagSetHTTP, - Ui: ui, - }, - }, nil - }, - } -} - -// makeShutdownCh returns a channel that can be used for shutdown -// notifications for commands. This channel will send a message for every -// interrupt or SIGTERM received. -func makeShutdownCh() <-chan struct{} { - resultCh := make(chan struct{}) - - signalCh := make(chan os.Signal, 4) - signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) - go func() { - for { - <-signalCh - resultCh <- struct{}{} - } - }() - - return resultCh -} diff --git a/vendor/github.com/hashicorp/consul/main.go b/vendor/github.com/hashicorp/consul/main.go deleted file mode 100644 index 1867a73f4..000000000 --- a/vendor/github.com/hashicorp/consul/main.go +++ /dev/null @@ -1,61 +0,0 @@ -package main - -import ( - "fmt" - "github.com/mitchellh/cli" - "io/ioutil" - "log" - "os" - - "github.com/hashicorp/consul/lib" -) - -func init() { - lib.SeedMathRand() -} - -func main() { - os.Exit(realMain()) -} - -func realMain() int { - log.SetOutput(ioutil.Discard) - - // Get the command line args. We shortcut "--version" and "-v" to - // just show the version. - args := os.Args[1:] - for _, arg := range args { - if arg == "--" { - break - } - if arg == "-v" || arg == "--version" { - newArgs := make([]string, len(args)+1) - newArgs[0] = "version" - copy(newArgs[1:], args) - args = newArgs - break - } - } - - // Filter out the configtest command from the help display - var included []string - for command := range Commands { - if command != "configtest" { - included = append(included, command) - } - } - - cli := &cli.CLI{ - Args: args, - Commands: Commands, - HelpFunc: cli.FilteredHelpFunc(included, cli.BasicHelpFunc("consul")), - } - - exitCode, err := cli.Run() - if err != nil { - fmt.Fprintf(os.Stderr, "Error executing CLI: %s\n", err.Error()) - return 1 - } - - return exitCode -} diff --git a/vendor/github.com/hashicorp/consul/website/LICENSE.md b/vendor/github.com/hashicorp/consul/website/LICENSE.md new file mode 100644 index 000000000..3189f43a6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/website/LICENSE.md @@ -0,0 +1,10 @@ +# Proprietary License + +This license is temporary while a more official one is drafted. However, +this should make it clear: + +The text contents of this website are MPL 2.0 licensed. + +The design contents of this website are proprietary and may not be reproduced +or reused in any way other than to run the website locally. The license for +the design is owned solely by HashiCorp, Inc. diff --git a/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright b/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright new file mode 100644 index 000000000..21a1a1b53 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright @@ -0,0 +1,2 @@ +Name: serf +Copyright: Hashicorp 2013 diff --git a/vendor/github.com/hashicorp/serf/website/LICENSE.md b/vendor/github.com/hashicorp/serf/website/LICENSE.md new file mode 100644 index 000000000..3189f43a6 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/website/LICENSE.md @@ -0,0 +1,10 @@ +# Proprietary License + +This license is temporary while a more official one is drafted. However, +this should make it clear: + +The text contents of this website are MPL 2.0 licensed. + +The design contents of this website are proprietary and may not be reproduced +or reused in any way other than to run the website locally. The license for +the design is owned solely by HashiCorp, Inc. diff --git a/vendor/github.com/hashicorp/serf/website/source/LICENSE b/vendor/github.com/hashicorp/serf/website/source/LICENSE new file mode 100644 index 000000000..36c29d7f7 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/website/source/LICENSE @@ -0,0 +1,10 @@ +# Proprietary License + +This license is temporary while a more official one is drafted. However, +this should make it clear: + +* The text contents of this website are MPL 2.0 licensed. + +* The design contents of this website are proprietary and may not be reproduced + or reused in any way other than to run the Serf website locally. The license + for the design is owned solely by HashiCorp, Inc. diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml new file mode 100644 index 000000000..2f1ad0082 --- /dev/null +++ b/vendor/github.com/imdario/mergo/testdata/license.yml @@ -0,0 +1,4 @@ +import: ../../../../fossene/db/schema/thing.yml +fields: + site: string + author: root diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go deleted file mode 100644 index 773eb279d..000000000 --- a/vendor/github.com/influxdata/influxdb/client/influxdb.go +++ /dev/null @@ -1,832 +0,0 @@ -// Package client implements a now-deprecated client for InfluxDB; -// use github.com/influxdata/influxdb/client/v2 instead. -package client // import "github.com/influxdata/influxdb/client" - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/influxdata/influxdb/models" -) - -const ( - // DefaultHost is the default host used to connect to an InfluxDB instance - DefaultHost = "localhost" - - // DefaultPort is the default port used to connect to an InfluxDB instance - DefaultPort = 8086 - - // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance - DefaultTimeout = 0 -) - -// Query is used to send a command to the server. Both Command and Database are required. -type Query struct { - Command string - Database string - - // Chunked tells the server to send back chunked responses. This places - // less load on the server by sending back chunks of the response rather - // than waiting for the entire response all at once. - Chunked bool - - // ChunkSize sets the maximum number of rows that will be returned per - // chunk. Chunks are either divided based on their series or if they hit - // the chunk size limit. - // - // Chunked must be set to true for this option to be used. - ChunkSize int -} - -// ParseConnectionString will parse a string to create a valid connection URL -func ParseConnectionString(path string, ssl bool) (url.URL, error) { - var host string - var port int - - h, p, err := net.SplitHostPort(path) - if err != nil { - if path == "" { - host = DefaultHost - } else { - host = path - } - // If they didn't specify a port, always use the default port - port = DefaultPort - } else { - host = h - port, err = strconv.Atoi(p) - if err != nil { - return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err) - } - } - - u := url.URL{ - Scheme: "http", - } - if ssl { - u.Scheme = "https" - } - - u.Host = net.JoinHostPort(host, strconv.Itoa(port)) - - return u, nil -} - -// Config is used to specify what server to connect to. -// URL: The URL of the server connecting to. -// Username/Password are optional. They will be passed via basic auth if provided. -// UserAgent: If not provided, will default "InfluxDBClient", -// Timeout: If not provided, will default to 0 (no timeout) -type Config struct { - URL url.URL - UnixSocket string - Username string - Password string - UserAgent string - Timeout time.Duration - Precision string - WriteConsistency string - UnsafeSsl bool -} - -// NewConfig will create a config to be used in connecting to the client -func NewConfig() Config { - return Config{ - Timeout: DefaultTimeout, - } -} - -// Client is used to make calls to the server. -type Client struct { - url url.URL - unixSocket string - username string - password string - httpClient *http.Client - userAgent string - precision string -} - -const ( - // ConsistencyOne requires at least one data node acknowledged a write. - ConsistencyOne = "one" - - // ConsistencyAll requires all data nodes to acknowledge a write. - ConsistencyAll = "all" - - // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write. - ConsistencyQuorum = "quorum" - - // ConsistencyAny allows for hinted hand off, potentially no write happened yet. - ConsistencyAny = "any" -) - -// NewClient will instantiate and return a connected client to issue commands to the server. -func NewClient(c Config) (*Client, error) { - tlsConfig := &tls.Config{ - InsecureSkipVerify: c.UnsafeSsl, - } - - tr := &http.Transport{ - TLSClientConfig: tlsConfig, - } - - if c.UnixSocket != "" { - // No need for compression in local communications. - tr.DisableCompression = true - - tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", c.UnixSocket) - } - } - - client := Client{ - url: c.URL, - unixSocket: c.UnixSocket, - username: c.Username, - password: c.Password, - httpClient: &http.Client{Timeout: c.Timeout, Transport: tr}, - userAgent: c.UserAgent, - precision: c.Precision, - } - if client.userAgent == "" { - client.userAgent = "InfluxDBClient" - } - return &client, nil -} - -// SetAuth will update the username and passwords -func (c *Client) SetAuth(u, p string) { - c.username = u - c.password = p -} - -// SetPrecision will update the precision -func (c *Client) SetPrecision(precision string) { - c.precision = precision -} - -// Query sends a command to the server and returns the Response -func (c *Client) Query(q Query) (*Response, error) { - u := c.url - - u.Path = "query" - values := u.Query() - values.Set("q", q.Command) - values.Set("db", q.Database) - if q.Chunked { - values.Set("chunked", "true") - if q.ChunkSize > 0 { - values.Set("chunk_size", strconv.Itoa(q.ChunkSize)) - } - } - if c.precision != "" { - values.Set("epoch", c.precision) - } - u.RawQuery = values.Encode() - - req, err := http.NewRequest("POST", u.String(), nil) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var response Response - if q.Chunked { - cr := NewChunkedResponse(resp.Body) - for { - r, err := cr.NextResponse() - if err != nil { - // If we got an error while decoding the response, send that back. - return nil, err - } - - if r == nil { - break - } - - response.Results = append(response.Results, r.Results...) - if r.Err != nil { - response.Err = r.Err - break - } - } - } else { - dec := json.NewDecoder(resp.Body) - dec.UseNumber() - if err := dec.Decode(&response); err != nil { - // Ignore EOF errors if we got an invalid status code. - if !(err == io.EOF && resp.StatusCode != http.StatusOK) { - return nil, err - } - } - } - - // If we don't have an error in our json response, and didn't get StatusOK, - // then send back an error. - if resp.StatusCode != http.StatusOK && response.Error() == nil { - return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) - } - return &response, nil -} - -// Write takes BatchPoints and allows for writing of multiple points with defaults -// If successful, error is nil and Response is nil -// If an error occurs, Response may contain additional information if populated. -func (c *Client) Write(bp BatchPoints) (*Response, error) { - u := c.url - u.Path = "write" - - var b bytes.Buffer - for _, p := range bp.Points { - err := checkPointTypes(p) - if err != nil { - return nil, err - } - if p.Raw != "" { - if _, err := b.WriteString(p.Raw); err != nil { - return nil, err - } - } else { - for k, v := range bp.Tags { - if p.Tags == nil { - p.Tags = make(map[string]string, len(bp.Tags)) - } - p.Tags[k] = v - } - - if _, err := b.WriteString(p.MarshalString()); err != nil { - return nil, err - } - } - - if err := b.WriteByte('\n'); err != nil { - return nil, err - } - } - - req, err := http.NewRequest("POST", u.String(), &b) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "") - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - - precision := bp.Precision - if precision == "" { - precision = c.precision - } - - params := req.URL.Query() - params.Set("db", bp.Database) - params.Set("rp", bp.RetentionPolicy) - params.Set("precision", precision) - params.Set("consistency", bp.WriteConsistency) - req.URL.RawQuery = params.Encode() - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var response Response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { - var err = fmt.Errorf(string(body)) - response.Err = err - return &response, err - } - - return nil, nil -} - -// WriteLineProtocol takes a string with line returns to delimit each write -// If successful, error is nil and Response is nil -// If an error occurs, Response may contain additional information if populated. -func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) { - u := c.url - u.Path = "write" - - r := strings.NewReader(data) - - req, err := http.NewRequest("POST", u.String(), r) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "") - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - params := req.URL.Query() - params.Set("db", database) - params.Set("rp", retentionPolicy) - params.Set("precision", precision) - params.Set("consistency", writeConsistency) - req.URL.RawQuery = params.Encode() - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var response Response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { - err := fmt.Errorf(string(body)) - response.Err = err - return &response, err - } - - return nil, nil -} - -// Ping will check to see if the server is up -// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. -func (c *Client) Ping() (time.Duration, string, error) { - now := time.Now() - u := c.url - u.Path = "ping" - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return 0, "", err - } - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - - resp, err := c.httpClient.Do(req) - if err != nil { - return 0, "", err - } - defer resp.Body.Close() - - version := resp.Header.Get("X-Influxdb-Version") - return time.Since(now), version, nil -} - -// Structs - -// Message represents a user message. -type Message struct { - Level string `json:"level,omitempty"` - Text string `json:"text,omitempty"` -} - -// Result represents a resultset returned from a single statement. -type Result struct { - Series []models.Row - Messages []*Message - Err error -} - -// MarshalJSON encodes the result into JSON. -func (r *Result) MarshalJSON() ([]byte, error) { - // Define a struct that outputs "error" as a string. - var o struct { - Series []models.Row `json:"series,omitempty"` - Messages []*Message `json:"messages,omitempty"` - Err string `json:"error,omitempty"` - } - - // Copy fields to output struct. - o.Series = r.Series - o.Messages = r.Messages - if r.Err != nil { - o.Err = r.Err.Error() - } - - return json.Marshal(&o) -} - -// UnmarshalJSON decodes the data into the Result struct -func (r *Result) UnmarshalJSON(b []byte) error { - var o struct { - Series []models.Row `json:"series,omitempty"` - Messages []*Message `json:"messages,omitempty"` - Err string `json:"error,omitempty"` - } - - dec := json.NewDecoder(bytes.NewBuffer(b)) - dec.UseNumber() - err := dec.Decode(&o) - if err != nil { - return err - } - r.Series = o.Series - r.Messages = o.Messages - if o.Err != "" { - r.Err = errors.New(o.Err) - } - return nil -} - -// Response represents a list of statement results. -type Response struct { - Results []Result - Err error -} - -// MarshalJSON encodes the response into JSON. -func (r *Response) MarshalJSON() ([]byte, error) { - // Define a struct that outputs "error" as a string. - var o struct { - Results []Result `json:"results,omitempty"` - Err string `json:"error,omitempty"` - } - - // Copy fields to output struct. - o.Results = r.Results - if r.Err != nil { - o.Err = r.Err.Error() - } - - return json.Marshal(&o) -} - -// UnmarshalJSON decodes the data into the Response struct -func (r *Response) UnmarshalJSON(b []byte) error { - var o struct { - Results []Result `json:"results,omitempty"` - Err string `json:"error,omitempty"` - } - - dec := json.NewDecoder(bytes.NewBuffer(b)) - dec.UseNumber() - err := dec.Decode(&o) - if err != nil { - return err - } - r.Results = o.Results - if o.Err != "" { - r.Err = errors.New(o.Err) - } - return nil -} - -// Error returns the first error from any statement. -// Returns nil if no errors occurred on any statements. -func (r *Response) Error() error { - if r.Err != nil { - return r.Err - } - for _, result := range r.Results { - if result.Err != nil { - return result.Err - } - } - return nil -} - -// duplexReader reads responses and writes it to another writer while -// satisfying the reader interface. -type duplexReader struct { - r io.Reader - w io.Writer -} - -func (r *duplexReader) Read(p []byte) (n int, err error) { - n, err = r.r.Read(p) - if err == nil { - r.w.Write(p[:n]) - } - return n, err -} - -// ChunkedResponse represents a response from the server that -// uses chunking to stream the output. -type ChunkedResponse struct { - dec *json.Decoder - duplex *duplexReader - buf bytes.Buffer -} - -// NewChunkedResponse reads a stream and produces responses from the stream. -func NewChunkedResponse(r io.Reader) *ChunkedResponse { - resp := &ChunkedResponse{} - resp.duplex = &duplexReader{r: r, w: &resp.buf} - resp.dec = json.NewDecoder(resp.duplex) - resp.dec.UseNumber() - return resp -} - -// NextResponse reads the next line of the stream and returns a response. -func (r *ChunkedResponse) NextResponse() (*Response, error) { - var response Response - if err := r.dec.Decode(&response); err != nil { - if err == io.EOF { - return nil, nil - } - // A decoding error happened. This probably means the server crashed - // and sent a last-ditch error message to us. Ensure we have read the - // entirety of the connection to get any remaining error text. - io.Copy(ioutil.Discard, r.duplex) - return nil, errors.New(strings.TrimSpace(r.buf.String())) - } - r.buf.Reset() - return &response, nil -} - -// Point defines the fields that will be written to the database -// Measurement, Time, and Fields are required -// Precision can be specified if the time is in epoch format (integer). -// Valid values for Precision are n, u, ms, s, m, and h -type Point struct { - Measurement string - Tags map[string]string - Time time.Time - Fields map[string]interface{} - Precision string - Raw string -} - -// MarshalJSON will format the time in RFC3339Nano -// Precision is also ignored as it is only used for writing, not reading -// Or another way to say it is we always send back in nanosecond precision -func (p *Point) MarshalJSON() ([]byte, error) { - point := struct { - Measurement string `json:"measurement,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - Time string `json:"time,omitempty"` - Fields map[string]interface{} `json:"fields,omitempty"` - Precision string `json:"precision,omitempty"` - }{ - Measurement: p.Measurement, - Tags: p.Tags, - Fields: p.Fields, - Precision: p.Precision, - } - // Let it omit empty if it's really zero - if !p.Time.IsZero() { - point.Time = p.Time.UTC().Format(time.RFC3339Nano) - } - return json.Marshal(&point) -} - -// MarshalString renders string representation of a Point with specified -// precision. The default precision is nanoseconds. -func (p *Point) MarshalString() string { - pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time) - if err != nil { - return "# ERROR: " + err.Error() + " " + p.Measurement - } - if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { - return pt.String() - } - return pt.PrecisionString(p.Precision) -} - -// UnmarshalJSON decodes the data into the Point struct -func (p *Point) UnmarshalJSON(b []byte) error { - var normal struct { - Measurement string `json:"measurement"` - Tags map[string]string `json:"tags"` - Time time.Time `json:"time"` - Precision string `json:"precision"` - Fields map[string]interface{} `json:"fields"` - } - var epoch struct { - Measurement string `json:"measurement"` - Tags map[string]string `json:"tags"` - Time *int64 `json:"time"` - Precision string `json:"precision"` - Fields map[string]interface{} `json:"fields"` - } - - if err := func() error { - var err error - dec := json.NewDecoder(bytes.NewBuffer(b)) - dec.UseNumber() - if err = dec.Decode(&epoch); err != nil { - return err - } - // Convert from epoch to time.Time, but only if Time - // was actually set. - var ts time.Time - if epoch.Time != nil { - ts, err = EpochToTime(*epoch.Time, epoch.Precision) - if err != nil { - return err - } - } - p.Measurement = epoch.Measurement - p.Tags = epoch.Tags - p.Time = ts - p.Precision = epoch.Precision - p.Fields = normalizeFields(epoch.Fields) - return nil - }(); err == nil { - return nil - } - - dec := json.NewDecoder(bytes.NewBuffer(b)) - dec.UseNumber() - if err := dec.Decode(&normal); err != nil { - return err - } - normal.Time = SetPrecision(normal.Time, normal.Precision) - p.Measurement = normal.Measurement - p.Tags = normal.Tags - p.Time = normal.Time - p.Precision = normal.Precision - p.Fields = normalizeFields(normal.Fields) - - return nil -} - -// Remove any notion of json.Number -func normalizeFields(fields map[string]interface{}) map[string]interface{} { - newFields := map[string]interface{}{} - - for k, v := range fields { - switch v := v.(type) { - case json.Number: - jv, e := v.Float64() - if e != nil { - panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e)) - } - newFields[k] = jv - default: - newFields[k] = v - } - } - return newFields -} - -// BatchPoints is used to send batched data in a single write. -// Database and Points are required -// If no retention policy is specified, it will use the databases default retention policy. -// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored. -// If time is specified, it will be applied to any point with an empty time. -// Precision can be specified if the time is in epoch format (integer). -// Valid values for Precision are n, u, ms, s, m, and h -type BatchPoints struct { - Points []Point `json:"points,omitempty"` - Database string `json:"database,omitempty"` - RetentionPolicy string `json:"retentionPolicy,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - Time time.Time `json:"time,omitempty"` - Precision string `json:"precision,omitempty"` - WriteConsistency string `json:"-"` -} - -// UnmarshalJSON decodes the data into the BatchPoints struct -func (bp *BatchPoints) UnmarshalJSON(b []byte) error { - var normal struct { - Points []Point `json:"points"` - Database string `json:"database"` - RetentionPolicy string `json:"retentionPolicy"` - Tags map[string]string `json:"tags"` - Time time.Time `json:"time"` - Precision string `json:"precision"` - } - var epoch struct { - Points []Point `json:"points"` - Database string `json:"database"` - RetentionPolicy string `json:"retentionPolicy"` - Tags map[string]string `json:"tags"` - Time *int64 `json:"time"` - Precision string `json:"precision"` - } - - if err := func() error { - var err error - if err = json.Unmarshal(b, &epoch); err != nil { - return err - } - // Convert from epoch to time.Time - var ts time.Time - if epoch.Time != nil { - ts, err = EpochToTime(*epoch.Time, epoch.Precision) - if err != nil { - return err - } - } - bp.Points = epoch.Points - bp.Database = epoch.Database - bp.RetentionPolicy = epoch.RetentionPolicy - bp.Tags = epoch.Tags - bp.Time = ts - bp.Precision = epoch.Precision - return nil - }(); err == nil { - return nil - } - - if err := json.Unmarshal(b, &normal); err != nil { - return err - } - normal.Time = SetPrecision(normal.Time, normal.Precision) - bp.Points = normal.Points - bp.Database = normal.Database - bp.RetentionPolicy = normal.RetentionPolicy - bp.Tags = normal.Tags - bp.Time = normal.Time - bp.Precision = normal.Precision - - return nil -} - -// utility functions - -// Addr provides the current url as a string of the server the client is connected to. -func (c *Client) Addr() string { - if c.unixSocket != "" { - return c.unixSocket - } - return c.url.String() -} - -// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found. -func checkPointTypes(p Point) error { - for _, v := range p.Fields { - switch v.(type) { - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil: - return nil - default: - return fmt.Errorf("unsupported point type: %T", v) - } - } - return nil -} - -// helper functions - -// EpochToTime takes a unix epoch time and uses precision to return back a time.Time -func EpochToTime(epoch int64, precision string) (time.Time, error) { - if precision == "" { - precision = "s" - } - var t time.Time - switch precision { - case "h": - t = time.Unix(0, epoch*int64(time.Hour)) - case "m": - t = time.Unix(0, epoch*int64(time.Minute)) - case "s": - t = time.Unix(0, epoch*int64(time.Second)) - case "ms": - t = time.Unix(0, epoch*int64(time.Millisecond)) - case "u": - t = time.Unix(0, epoch*int64(time.Microsecond)) - case "n": - t = time.Unix(0, epoch) - default: - return time.Time{}, fmt.Errorf("Unknown precision %q", precision) - } - return t, nil -} - -// SetPrecision will round a time to the specified precision -func SetPrecision(t time.Time, precision string) time.Time { - switch precision { - case "n": - case "u": - return t.Round(time.Microsecond) - case "ms": - return t.Round(time.Millisecond) - case "s": - return t.Round(time.Second) - case "m": - return t.Round(time.Minute) - case "h": - return t.Round(time.Hour) - } - return t -} diff --git a/vendor/github.com/influxdata/influxdb/errors.go b/vendor/github.com/influxdata/influxdb/errors.go deleted file mode 100644 index 9bc6b9988..000000000 --- a/vendor/github.com/influxdata/influxdb/errors.go +++ /dev/null @@ -1,42 +0,0 @@ -package influxdb - -import ( - "errors" - "fmt" - "strings" -) - -// ErrFieldTypeConflict is returned when a new field already exists with a -// different type. -var ErrFieldTypeConflict = errors.New("field type conflict") - -// ErrDatabaseNotFound indicates that a database operation failed on the -// specified database because the specified database does not exist. -func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } - -// ErrRetentionPolicyNotFound indicates that the named retention policy could -// not be found in the database. -func ErrRetentionPolicyNotFound(name string) error { - return fmt.Errorf("retention policy not found: %s", name) -} - -// IsAuthorizationError indicates whether an error is due to an authorization failure -func IsAuthorizationError(err error) bool { - e, ok := err.(interface { - AuthorizationFailed() bool - }) - return ok && e.AuthorizationFailed() -} - -// IsClientError indicates whether an error is a known client error. -func IsClientError(err error) bool { - if err == nil { - return false - } - - if strings.HasPrefix(err.Error(), ErrFieldTypeConflict.Error()) { - return true - } - - return false -} diff --git a/vendor/github.com/influxdata/influxdb/influxdb.go b/vendor/github.com/influxdata/influxdb/influxdb.go deleted file mode 100644 index a59417507..000000000 --- a/vendor/github.com/influxdata/influxdb/influxdb.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package influxdb is the root package of InfluxDB, -// the scalable datastore for metrics, events, and real-time analytics. -// -// If you're looking for the Go HTTP client for InfluxDB, -// see package github.com/influxdata/influxdb/client/v2. -package influxdb // import "github.com/influxdata/influxdb" diff --git a/vendor/github.com/influxdata/influxdb/node.go b/vendor/github.com/influxdata/influxdb/node.go deleted file mode 100644 index 68709edc3..000000000 --- a/vendor/github.com/influxdata/influxdb/node.go +++ /dev/null @@ -1,121 +0,0 @@ -package influxdb - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" -) - -const ( - nodeFile = "node.json" - oldNodeFile = "id" - peersFilename = "peers.json" -) - -type Node struct { - path string - ID uint64 -} - -// LoadNode will load the node information from disk if present -func LoadNode(path string) (*Node, error) { - // Always check to see if we are upgrading first - if err := upgradeNodeFile(path); err != nil { - return nil, err - } - - n := &Node{ - path: path, - } - - f, err := os.Open(filepath.Join(path, nodeFile)) - if err != nil { - return nil, err - } - defer f.Close() - - if err := json.NewDecoder(f).Decode(n); err != nil { - return nil, err - } - - return n, nil -} - -// NewNode will return a new node -func NewNode(path string) *Node { - return &Node{ - path: path, - } -} - -// Save will save the node file to disk and replace the existing one if present -func (n *Node) Save() error { - file := filepath.Join(n.path, nodeFile) - tmpFile := file + "tmp" - - f, err := os.Create(tmpFile) - if err != nil { - return err - } - - if err = json.NewEncoder(f).Encode(n); err != nil { - f.Close() - return err - } - - if err = f.Close(); nil != err { - return err - } - - return os.Rename(tmpFile, file) -} - -func upgradeNodeFile(path string) error { - oldFile := filepath.Join(path, oldNodeFile) - b, err := ioutil.ReadFile(oldFile) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - // We shouldn't have an empty ID file, but if we do, ignore it - if len(b) == 0 { - return nil - } - - peers := []string{} - pb, err := ioutil.ReadFile(filepath.Join(path, peersFilename)) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - err = json.Unmarshal(pb, &peers) - if err != nil { - return err - } - - if len(peers) > 1 { - return fmt.Errorf("to upgrade a cluster, please contact support at influxdata") - } - - n := &Node{ - path: path, - } - if n.ID, err = strconv.ParseUint(string(b), 10, 64); err != nil { - return err - } - if err := n.Save(); err != nil { - return err - } - if err := os.Remove(oldFile); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/mailru/easyjson/helpers.go b/vendor/github.com/mailru/easyjson/helpers.go deleted file mode 100644 index b86b87d22..000000000 --- a/vendor/github.com/mailru/easyjson/helpers.go +++ /dev/null @@ -1,78 +0,0 @@ -// Package easyjson contains marshaler/unmarshaler interfaces and helper functions. -package easyjson - -import ( - "io" - "io/ioutil" - "net/http" - "strconv" - - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// Marshaler is an easyjson-compatible marshaler interface. -type Marshaler interface { - MarshalEasyJSON(w *jwriter.Writer) -} - -// Marshaler is an easyjson-compatible unmarshaler interface. -type Unmarshaler interface { - UnmarshalEasyJSON(w *jlexer.Lexer) -} - -// Optional defines an undefined-test method for a type to integrate with 'omitempty' logic. -type Optional interface { - IsDefined() bool -} - -// Marshal returns data as a single byte slice. Method is suboptimal as the data is likely to be copied -// from a chain of smaller chunks. -func Marshal(v Marshaler) ([]byte, error) { - w := jwriter.Writer{} - v.MarshalEasyJSON(&w) - return w.BuildBytes() -} - -// MarshalToWriter marshals the data to an io.Writer. -func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) { - jw := jwriter.Writer{} - v.MarshalEasyJSON(&jw) - return jw.DumpTo(w) -} - -// MarshalToHTTPResponseWriter sets Content-Length and Content-Type headers for the -// http.ResponseWriter, and send the data to the writer. started will be equal to -// false if an error occurred before any http.ResponseWriter methods were actually -// invoked (in this case a 500 reply is possible). -func MarshalToHTTPResponseWriter(v Marshaler, w http.ResponseWriter) (started bool, written int, err error) { - jw := jwriter.Writer{} - v.MarshalEasyJSON(&jw) - if jw.Error != nil { - return false, 0, jw.Error - } - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Content-Length", strconv.Itoa(jw.Size())) - - started = true - written, err = jw.DumpTo(w) - return -} - -// Unmarshal decodes the JSON in data into the object. -func Unmarshal(data []byte, v Unmarshaler) error { - l := jlexer.Lexer{Data: data} - v.UnmarshalEasyJSON(&l) - return l.Error() -} - -// UnmarshalFromReader reads all the data in the reader and decodes as JSON into the object. -func UnmarshalFromReader(r io.Reader, v Unmarshaler) error { - data, err := ioutil.ReadAll(r) - if err != nil { - return err - } - l := jlexer.Lexer{Data: data} - v.UnmarshalEasyJSON(&l) - return l.Error() -} diff --git a/vendor/github.com/mailru/easyjson/raw.go b/vendor/github.com/mailru/easyjson/raw.go deleted file mode 100644 index 737c73a88..000000000 --- a/vendor/github.com/mailru/easyjson/raw.go +++ /dev/null @@ -1,45 +0,0 @@ -package easyjson - -import ( - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// RawMessage is a raw piece of JSON (number, string, bool, object, array or null) that is extracted -// without parsing and output as is during marshalling. -type RawMessage []byte - -// MarshalEasyJSON does JSON marshaling using easyjson interface. -func (v *RawMessage) MarshalEasyJSON(w *jwriter.Writer) { - if len(*v) == 0 { - w.RawString("null") - } else { - w.Raw(*v, nil) - } -} - -// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. -func (v *RawMessage) UnmarshalEasyJSON(l *jlexer.Lexer) { - *v = RawMessage(l.Raw()) -} - -// UnmarshalJSON implements encoding/json.Unmarshaler interface. -func (v *RawMessage) UnmarshalJSON(data []byte) error { - *v = data - return nil -} - -var nullBytes = []byte("null") - -// MarshalJSON implements encoding/json.Marshaler interface. -func (v RawMessage) MarshalJSON() ([]byte, error) { - if len(v) == 0 { - return nullBytes, nil - } - return v, nil -} - -// IsDefined is required for integration with omitempty easyjson logic. -func (v *RawMessage) IsDefined() bool { - return len(*v) > 0 -} diff --git a/vendor/github.com/mesos/mesos-go/mesos/doc.go b/vendor/github.com/mesos/mesos-go/mesos/doc.go deleted file mode 100644 index 6c0973f34..000000000 --- a/vendor/github.com/mesos/mesos-go/mesos/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// This package was previously the home of the native bindings. Please use the -// native branch if you need to build against the native bindings. -package mesos diff --git a/vendor/github.com/mesosphere/mesos-dns/main.go b/vendor/github.com/mesosphere/mesos-dns/main.go deleted file mode 100644 index 39eaff15d..000000000 --- a/vendor/github.com/mesosphere/mesos-dns/main.go +++ /dev/null @@ -1,98 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "time" - - "github.com/mesos/mesos-go/detector" - "github.com/mesosphere/mesos-dns/detect" - "github.com/mesosphere/mesos-dns/logging" - "github.com/mesosphere/mesos-dns/records" - "github.com/mesosphere/mesos-dns/resolver" - "github.com/mesosphere/mesos-dns/util" -) - -func main() { - util.PanicHandlers = append(util.PanicHandlers, func(_ interface{}) { - // by default the handler already logs the panic - os.Exit(1) - }) - - var versionFlag bool - - // parse flags - cjson := flag.String("config", "config.json", "path to config file (json)") - flag.BoolVar(&versionFlag, "version", false, "output the version") - flag.Parse() - - // -version - if versionFlag { - fmt.Println(Version) - os.Exit(0) - } - - // initialize logging - logging.SetupLogs() - - // initialize resolver - config := records.SetConfig(*cjson) - res := resolver.New(Version, config) - errch := make(chan error) - - // launch DNS server - if config.DNSOn { - go func() { errch <- <-res.LaunchDNS() }() - } - - // launch HTTP server - if config.HTTPOn { - go func() { errch <- <-res.LaunchHTTP() }() - } - - changed := detectMasters(config.Zk, config.Masters) - reload := time.NewTicker(time.Second * time.Duration(config.RefreshSeconds)) - zkTimeout := time.Second * time.Duration(config.ZkDetectionTimeout) - timeout := time.AfterFunc(zkTimeout, func() { - if zkTimeout > 0 { - errch <- fmt.Errorf("master detection timed out after %s", zkTimeout) - } - }) - - defer reload.Stop() - defer util.HandleCrash() - for { - select { - case <-reload.C: - res.Reload() - case masters := <-changed: - if len(masters) == 0 || masters[0] == "" { // no leader - timeout.Reset(zkTimeout) - } else { - timeout.Stop() - } - logging.VeryVerbose.Printf("new masters detected: %v", masters) - res.SetMasters(masters) - res.Reload() - case err := <-errch: - logging.Error.Fatal(err) - } - } -} - -func detectMasters(zk string, masters []string) <-chan []string { - changed := make(chan []string, 1) - if zk != "" { - logging.Verbose.Println("Starting master detector for ZK ", zk) - if md, err := detector.New(zk); err != nil { - log.Fatalf("failed to create master detector: %v", err) - } else if err := md.Detect(detect.NewMasters(masters, changed)); err != nil { - log.Fatalf("failed to initialize master detector: %v", err) - } - } else { - changed <- masters - } - return changed -} diff --git a/vendor/github.com/mesosphere/mesos-dns/version.go b/vendor/github.com/mesosphere/mesos-dns/version.go deleted file mode 100644 index ba652d28c..000000000 --- a/vendor/github.com/mesosphere/mesos-dns/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package main - -// Version is the Mesos-DNS version string, set at build time. -var Version = "dev" diff --git a/vendor/github.com/opencontainers/runc/checkpoint.go b/vendor/github.com/opencontainers/runc/checkpoint.go deleted file mode 100644 index 9b5663f37..000000000 --- a/vendor/github.com/opencontainers/runc/checkpoint.go +++ /dev/null @@ -1,132 +0,0 @@ -// +build linux - -package main - -import ( - "fmt" - "strconv" - "strings" - "syscall" - - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/urfave/cli" -) - -var checkpointCommand = cli.Command{ - Name: "checkpoint", - Usage: "checkpoint a running container", - ArgsUsage: ` - -Where "" is the name for the instance of the container to be -checkpointed.`, - Description: `The checkpoint command saves the state of the container instance.`, - Flags: []cli.Flag{ - cli.StringFlag{Name: "image-path", Value: "", Usage: "path for saving criu image files"}, - cli.StringFlag{Name: "work-path", Value: "", Usage: "path for saving work files and logs"}, - cli.StringFlag{Name: "parent-path", Value: "", Usage: "path for previous criu image files in pre-dump"}, - cli.BoolFlag{Name: "leave-running", Usage: "leave the process running after checkpointing"}, - cli.BoolFlag{Name: "tcp-established", Usage: "allow open tcp connections"}, - cli.BoolFlag{Name: "ext-unix-sk", Usage: "allow external unix sockets"}, - cli.BoolFlag{Name: "shell-job", Usage: "allow shell jobs"}, - cli.StringFlag{Name: "page-server", Value: "", Usage: "ADDRESS:PORT of the page server"}, - cli.BoolFlag{Name: "file-locks", Usage: "handle file locks, for safety"}, - cli.BoolFlag{Name: "pre-dump", Usage: "dump container's memory information only, leave the container running after this"}, - cli.StringFlag{Name: "manage-cgroups-mode", Value: "", Usage: "cgroups mode: 'soft' (default), 'full' and 'strict'"}, - cli.StringSliceFlag{Name: "empty-ns", Usage: "create a namespace, but don't restore its properties"}, - }, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, exactArgs); err != nil { - return err - } - // XXX: Currently this is untested with rootless containers. - if isRootless() { - return fmt.Errorf("runc checkpoint requires root") - } - - container, err := getContainer(context) - if err != nil { - return err - } - status, err := container.Status() - if err != nil { - return err - } - if status == libcontainer.Created { - fatalf("Container cannot be checkpointed in created state") - } - defer destroy(container) - options := criuOptions(context) - // these are the mandatory criu options for a container - setPageServer(context, options) - setManageCgroupsMode(context, options) - if err := setEmptyNsMask(context, options); err != nil { - return err - } - if err := container.Checkpoint(options); err != nil { - return err - } - return nil - }, -} - -func getCheckpointImagePath(context *cli.Context) string { - imagePath := context.String("image-path") - if imagePath == "" { - imagePath = getDefaultImagePath(context) - } - return imagePath -} - -func setPageServer(context *cli.Context, options *libcontainer.CriuOpts) { - // xxx following criu opts are optional - // The dump image can be sent to a criu page server - if psOpt := context.String("page-server"); psOpt != "" { - addressPort := strings.Split(psOpt, ":") - if len(addressPort) != 2 { - fatal(fmt.Errorf("Use --page-server ADDRESS:PORT to specify page server")) - } - portInt, err := strconv.Atoi(addressPort[1]) - if err != nil { - fatal(fmt.Errorf("Invalid port number")) - } - options.PageServer = libcontainer.CriuPageServerInfo{ - Address: addressPort[0], - Port: int32(portInt), - } - } -} - -func setManageCgroupsMode(context *cli.Context, options *libcontainer.CriuOpts) { - if cgOpt := context.String("manage-cgroups-mode"); cgOpt != "" { - switch cgOpt { - case "soft": - options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_SOFT - case "full": - options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_FULL - case "strict": - options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_STRICT - default: - fatal(fmt.Errorf("Invalid manage cgroups mode")) - } - } -} - -var namespaceMapping = map[specs.LinuxNamespaceType]int{ - specs.NetworkNamespace: syscall.CLONE_NEWNET, -} - -func setEmptyNsMask(context *cli.Context, options *libcontainer.CriuOpts) error { - var nsmask int - - for _, ns := range context.StringSlice("empty-ns") { - f, exists := namespaceMapping[specs.LinuxNamespaceType(ns)] - if !exists { - return fmt.Errorf("namespace %q is not supported", ns) - } - nsmask |= f - } - - options.EmptyNs = uint32(nsmask) - return nil -} diff --git a/vendor/github.com/opencontainers/runc/create.go b/vendor/github.com/opencontainers/runc/create.go deleted file mode 100644 index 096e8e335..000000000 --- a/vendor/github.com/opencontainers/runc/create.go +++ /dev/null @@ -1,74 +0,0 @@ -package main - -import ( - "os" - - "github.com/urfave/cli" -) - -var createCommand = cli.Command{ - Name: "create", - Usage: "create a container", - ArgsUsage: ` - -Where "" is your name for the instance of the container that you -are starting. The name you provide for the container instance must be unique on -your host.`, - Description: `The create command creates an instance of a container for a bundle. The bundle -is a directory with a specification file named "` + specConfig + `" and a root -filesystem. - -The specification file includes an args parameter. The args parameter is used -to specify command(s) that get run when the container is started. To change the -command(s) that get executed on start, edit the args parameter of the spec. See -"runc spec --help" for more explanation.`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "bundle, b", - Value: "", - Usage: `path to the root of the bundle directory, defaults to the current directory`, - }, - cli.StringFlag{ - Name: "console-socket", - Value: "", - Usage: "path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal", - }, - cli.StringFlag{ - Name: "pid-file", - Value: "", - Usage: "specify the file to write the process id to", - }, - cli.BoolFlag{ - Name: "no-pivot", - Usage: "do not use pivot root to jail process inside rootfs. This should be used whenever the rootfs is on top of a ramdisk", - }, - cli.BoolFlag{ - Name: "no-new-keyring", - Usage: "do not create a new session keyring for the container. This will cause the container to inherit the calling processes session key", - }, - cli.IntFlag{ - Name: "preserve-fds", - Usage: "Pass N additional file descriptors to the container (stdio + $LISTEN_FDS + N in total)", - }, - }, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, exactArgs); err != nil { - return err - } - if err := revisePidFile(context); err != nil { - return err - } - spec, err := setupSpec(context) - if err != nil { - return err - } - status, err := startContainer(context, spec, true) - if err != nil { - return err - } - // exit with the container's exit status so any external supervisor is - // notified of the exit with the correct exit status. - os.Exit(status) - return nil - }, -} diff --git a/vendor/github.com/opencontainers/runc/delete.go b/vendor/github.com/opencontainers/runc/delete.go deleted file mode 100644 index b43dcaa7c..000000000 --- a/vendor/github.com/opencontainers/runc/delete.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build !solaris - -package main - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" - - "github.com/opencontainers/runc/libcontainer" - "github.com/urfave/cli" -) - -func killContainer(container libcontainer.Container) error { - _ = container.Signal(syscall.SIGKILL, false) - for i := 0; i < 100; i++ { - time.Sleep(100 * time.Millisecond) - if err := container.Signal(syscall.Signal(0), false); err != nil { - destroy(container) - return nil - } - } - return fmt.Errorf("container init still running") -} - -var deleteCommand = cli.Command{ - Name: "delete", - Usage: "delete any resources held by the container often used with detached container", - ArgsUsage: ` - -Where "" is the name for the instance of the container. - -EXAMPLE: -For example, if the container id is "ubuntu01" and runc list currently shows the -status of "ubuntu01" as "stopped" the following will delete resources held for -"ubuntu01" removing "ubuntu01" from the runc list of containers: - - # runc delete ubuntu01`, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "force, f", - Usage: "Forcibly deletes the container if it is still running (uses SIGKILL)", - }, - }, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, exactArgs); err != nil { - return err - } - - id := context.Args().First() - container, err := getContainer(context) - if err != nil { - if lerr, ok := err.(libcontainer.Error); ok && lerr.Code() == libcontainer.ContainerNotExists { - // if there was an aborted start or something of the sort then the container's directory could exist but - // libcontainer does not see it because the state.json file inside that directory was never created. - path := filepath.Join(context.GlobalString("root"), id) - if e := os.RemoveAll(path); e != nil { - fmt.Fprintf(os.Stderr, "remove %s: %v\n", path, e) - } - } - return err - } - s, err := container.Status() - if err != nil { - return err - } - switch s { - case libcontainer.Stopped: - destroy(container) - case libcontainer.Created: - return killContainer(container) - default: - if context.Bool("force") { - return killContainer(container) - } else { - return fmt.Errorf("cannot delete container %s that is not stopped: %s\n", id, s) - } - } - - return nil - }, -} diff --git a/vendor/github.com/opencontainers/runc/events.go b/vendor/github.com/opencontainers/runc/events.go deleted file mode 100644 index 6c21e5259..000000000 --- a/vendor/github.com/opencontainers/runc/events.go +++ /dev/null @@ -1,260 +0,0 @@ -// +build linux - -package main - -import ( - "encoding/json" - "fmt" - "os" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/urfave/cli" -) - -// event struct for encoding the event data to json. -type event struct { - Type string `json:"type"` - ID string `json:"id"` - Data interface{} `json:"data,omitempty"` -} - -// stats is the runc specific stats structure for stability when encoding and decoding stats. -type stats struct { - CPU cpu `json:"cpu"` - Memory memory `json:"memory"` - Pids pids `json:"pids"` - Blkio blkio `json:"blkio"` - Hugetlb map[string]hugetlb `json:"hugetlb"` -} - -type hugetlb struct { - Usage uint64 `json:"usage,omitempty"` - Max uint64 `json:"max,omitempty"` - Failcnt uint64 `json:"failcnt"` -} - -type blkioEntry struct { - Major uint64 `json:"major,omitempty"` - Minor uint64 `json:"minor,omitempty"` - Op string `json:"op,omitempty"` - Value uint64 `json:"value,omitempty"` -} - -type blkio struct { - IoServiceBytesRecursive []blkioEntry `json:"ioServiceBytesRecursive,omitempty"` - IoServicedRecursive []blkioEntry `json:"ioServicedRecursive,omitempty"` - IoQueuedRecursive []blkioEntry `json:"ioQueueRecursive,omitempty"` - IoServiceTimeRecursive []blkioEntry `json:"ioServiceTimeRecursive,omitempty"` - IoWaitTimeRecursive []blkioEntry `json:"ioWaitTimeRecursive,omitempty"` - IoMergedRecursive []blkioEntry `json:"ioMergedRecursive,omitempty"` - IoTimeRecursive []blkioEntry `json:"ioTimeRecursive,omitempty"` - SectorsRecursive []blkioEntry `json:"sectorsRecursive,omitempty"` -} - -type pids struct { - Current uint64 `json:"current,omitempty"` - Limit uint64 `json:"limit,omitempty"` -} - -type throttling struct { - Periods uint64 `json:"periods,omitempty"` - ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` - ThrottledTime uint64 `json:"throttledTime,omitempty"` -} - -type cpuUsage struct { - // Units: nanoseconds. - Total uint64 `json:"total,omitempty"` - Percpu []uint64 `json:"percpu,omitempty"` - Kernel uint64 `json:"kernel"` - User uint64 `json:"user"` -} - -type cpu struct { - Usage cpuUsage `json:"usage,omitempty"` - Throttling throttling `json:"throttling,omitempty"` -} - -type memoryEntry struct { - Limit uint64 `json:"limit"` - Usage uint64 `json:"usage,omitempty"` - Max uint64 `json:"max,omitempty"` - Failcnt uint64 `json:"failcnt"` -} - -type memory struct { - Cache uint64 `json:"cache,omitempty"` - Usage memoryEntry `json:"usage,omitempty"` - Swap memoryEntry `json:"swap,omitempty"` - Kernel memoryEntry `json:"kernel,omitempty"` - KernelTCP memoryEntry `json:"kernelTCP,omitempty"` - Raw map[string]uint64 `json:"raw,omitempty"` -} - -var eventsCommand = cli.Command{ - Name: "events", - Usage: "display container events such as OOM notifications, cpu, memory, and IO usage statistics", - ArgsUsage: ` - -Where "" is the name for the instance of the container.`, - Description: `The events command displays information about the container. By default the -information is displayed once every 5 seconds.`, - Flags: []cli.Flag{ - cli.DurationFlag{Name: "interval", Value: 5 * time.Second, Usage: "set the stats collection interval"}, - cli.BoolFlag{Name: "stats", Usage: "display the container's stats then exit"}, - }, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, exactArgs); err != nil { - return err - } - container, err := getContainer(context) - if err != nil { - return err - } - duration := context.Duration("interval") - if duration <= 0 { - return fmt.Errorf("duration interval must be greater than 0") - } - status, err := container.Status() - if err != nil { - return err - } - if status == libcontainer.Stopped { - return fmt.Errorf("container with id %s is not running", container.ID()) - } - var ( - stats = make(chan *libcontainer.Stats, 1) - events = make(chan *event, 1024) - group = &sync.WaitGroup{} - ) - group.Add(1) - go func() { - defer group.Done() - enc := json.NewEncoder(os.Stdout) - for e := range events { - if err := enc.Encode(e); err != nil { - logrus.Error(err) - } - } - }() - if context.Bool("stats") { - s, err := container.Stats() - if err != nil { - return err - } - events <- &event{Type: "stats", ID: container.ID(), Data: convertLibcontainerStats(s)} - close(events) - group.Wait() - return nil - } - go func() { - for range time.Tick(context.Duration("interval")) { - s, err := container.Stats() - if err != nil { - logrus.Error(err) - continue - } - stats <- s - } - }() - n, err := container.NotifyOOM() - if err != nil { - return err - } - for { - select { - case _, ok := <-n: - if ok { - // this means an oom event was received, if it is !ok then - // the channel was closed because the container stopped and - // the cgroups no longer exist. - events <- &event{Type: "oom", ID: container.ID()} - } else { - n = nil - } - case s := <-stats: - events <- &event{Type: "stats", ID: container.ID(), Data: convertLibcontainerStats(s)} - } - if n == nil { - close(events) - break - } - } - group.Wait() - return nil - }, -} - -func convertLibcontainerStats(ls *libcontainer.Stats) *stats { - cg := ls.CgroupStats - if cg == nil { - return nil - } - var s stats - s.Pids.Current = cg.PidsStats.Current - s.Pids.Limit = cg.PidsStats.Limit - - s.CPU.Usage.Kernel = cg.CpuStats.CpuUsage.UsageInKernelmode - s.CPU.Usage.User = cg.CpuStats.CpuUsage.UsageInUsermode - s.CPU.Usage.Total = cg.CpuStats.CpuUsage.TotalUsage - s.CPU.Usage.Percpu = cg.CpuStats.CpuUsage.PercpuUsage - s.CPU.Throttling.Periods = cg.CpuStats.ThrottlingData.Periods - s.CPU.Throttling.ThrottledPeriods = cg.CpuStats.ThrottlingData.ThrottledPeriods - s.CPU.Throttling.ThrottledTime = cg.CpuStats.ThrottlingData.ThrottledTime - - s.Memory.Cache = cg.MemoryStats.Cache - s.Memory.Kernel = convertMemoryEntry(cg.MemoryStats.KernelUsage) - s.Memory.KernelTCP = convertMemoryEntry(cg.MemoryStats.KernelTCPUsage) - s.Memory.Swap = convertMemoryEntry(cg.MemoryStats.SwapUsage) - s.Memory.Usage = convertMemoryEntry(cg.MemoryStats.Usage) - s.Memory.Raw = cg.MemoryStats.Stats - - s.Blkio.IoServiceBytesRecursive = convertBlkioEntry(cg.BlkioStats.IoServiceBytesRecursive) - s.Blkio.IoServicedRecursive = convertBlkioEntry(cg.BlkioStats.IoServicedRecursive) - s.Blkio.IoQueuedRecursive = convertBlkioEntry(cg.BlkioStats.IoQueuedRecursive) - s.Blkio.IoServiceTimeRecursive = convertBlkioEntry(cg.BlkioStats.IoServiceTimeRecursive) - s.Blkio.IoWaitTimeRecursive = convertBlkioEntry(cg.BlkioStats.IoWaitTimeRecursive) - s.Blkio.IoMergedRecursive = convertBlkioEntry(cg.BlkioStats.IoMergedRecursive) - s.Blkio.IoTimeRecursive = convertBlkioEntry(cg.BlkioStats.IoTimeRecursive) - s.Blkio.SectorsRecursive = convertBlkioEntry(cg.BlkioStats.SectorsRecursive) - - s.Hugetlb = make(map[string]hugetlb) - for k, v := range cg.HugetlbStats { - s.Hugetlb[k] = convertHugtlb(v) - } - return &s -} - -func convertHugtlb(c cgroups.HugetlbStats) hugetlb { - return hugetlb{ - Usage: c.Usage, - Max: c.MaxUsage, - Failcnt: c.Failcnt, - } -} - -func convertMemoryEntry(c cgroups.MemoryData) memoryEntry { - return memoryEntry{ - Limit: c.Limit, - Usage: c.Usage, - Max: c.MaxUsage, - Failcnt: c.Failcnt, - } -} - -func convertBlkioEntry(c []cgroups.BlkioStatEntry) []blkioEntry { - var out []blkioEntry - for _, e := range c { - out = append(out, blkioEntry{ - Major: e.Major, - Minor: e.Minor, - Op: e.Op, - Value: e.Value, - }) - } - return out -} diff --git a/vendor/github.com/opencontainers/runc/exec.go b/vendor/github.com/opencontainers/runc/exec.go deleted file mode 100644 index 22f2689ab..000000000 --- a/vendor/github.com/opencontainers/runc/exec.go +++ /dev/null @@ -1,211 +0,0 @@ -// +build linux - -package main - -import ( - "encoding/json" - "fmt" - "os" - "strconv" - "strings" - - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/utils" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/urfave/cli" -) - -var execCommand = cli.Command{ - Name: "exec", - Usage: "execute new process inside the container", - ArgsUsage: ` [command options] || -p process.json - -Where "" is the name for the instance of the container and -"" is the command to be executed in the container. -"" can't be empty unless a "-p" flag provided. - -EXAMPLE: -For example, if the container is configured to run the linux ps command the -following will output a list of processes running in the container: - - # runc exec ps`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "console-socket", - Usage: "path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal", - }, - cli.StringFlag{ - Name: "cwd", - Usage: "current working directory in the container", - }, - cli.StringSliceFlag{ - Name: "env, e", - Usage: "set environment variables", - }, - cli.BoolFlag{ - Name: "tty, t", - Usage: "allocate a pseudo-TTY", - }, - cli.StringFlag{ - Name: "user, u", - Usage: "UID (format: [:])", - }, - cli.StringFlag{ - Name: "process, p", - Usage: "path to the process.json", - }, - cli.BoolFlag{ - Name: "detach,d", - Usage: "detach from the container's process", - }, - cli.StringFlag{ - Name: "pid-file", - Value: "", - Usage: "specify the file to write the process id to", - }, - cli.StringFlag{ - Name: "process-label", - Usage: "set the asm process label for the process commonly used with selinux", - }, - cli.StringFlag{ - Name: "apparmor", - Usage: "set the apparmor profile for the process", - }, - cli.BoolFlag{ - Name: "no-new-privs", - Usage: "set the no new privileges value for the process", - }, - cli.StringSliceFlag{ - Name: "cap, c", - Value: &cli.StringSlice{}, - Usage: "add a capability to the bounding set for the process", - }, - cli.BoolFlag{ - Name: "no-subreaper", - Usage: "disable the use of the subreaper used to reap reparented processes", - Hidden: true, - }, - }, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, minArgs); err != nil { - return err - } - if err := revisePidFile(context); err != nil { - return err - } - status, err := execProcess(context) - if err == nil { - os.Exit(status) - } - return fmt.Errorf("exec failed: %v", err) - }, - SkipArgReorder: true, -} - -func execProcess(context *cli.Context) (int, error) { - container, err := getContainer(context) - if err != nil { - return -1, err - } - status, err := container.Status() - if err != nil { - return -1, err - } - if status == libcontainer.Stopped { - return -1, fmt.Errorf("cannot exec a container that has stopped") - } - path := context.String("process") - if path == "" && len(context.Args()) == 1 { - return -1, fmt.Errorf("process args cannot be empty") - } - detach := context.Bool("detach") - state, err := container.State() - if err != nil { - return -1, err - } - bundle := utils.SearchLabels(state.Config.Labels, "bundle") - p, err := getProcess(context, bundle) - if err != nil { - return -1, err - } - r := &runner{ - enableSubreaper: false, - shouldDestroy: false, - container: container, - consoleSocket: context.String("console-socket"), - detach: detach, - pidFile: context.String("pid-file"), - } - return r.run(p) -} - -func getProcess(context *cli.Context, bundle string) (*specs.Process, error) { - if path := context.String("process"); path != "" { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - var p specs.Process - if err := json.NewDecoder(f).Decode(&p); err != nil { - return nil, err - } - return &p, validateProcessSpec(&p) - } - // process via cli flags - if err := os.Chdir(bundle); err != nil { - return nil, err - } - spec, err := loadSpec(specConfig) - if err != nil { - return nil, err - } - p := spec.Process - p.Args = context.Args()[1:] - // override the cwd, if passed - if context.String("cwd") != "" { - p.Cwd = context.String("cwd") - } - if ap := context.String("apparmor"); ap != "" { - p.ApparmorProfile = ap - } - if l := context.String("process-label"); l != "" { - p.SelinuxLabel = l - } - if caps := context.StringSlice("cap"); len(caps) > 0 { - for _, c := range caps { - p.Capabilities.Bounding = append(p.Capabilities.Bounding, c) - p.Capabilities.Inheritable = append(p.Capabilities.Inheritable, c) - p.Capabilities.Effective = append(p.Capabilities.Effective, c) - p.Capabilities.Permitted = append(p.Capabilities.Permitted, c) - p.Capabilities.Ambient = append(p.Capabilities.Ambient, c) - } - } - // append the passed env variables - p.Env = append(p.Env, context.StringSlice("env")...) - - // set the tty - if context.IsSet("tty") { - p.Terminal = context.Bool("tty") - } - if context.IsSet("no-new-privs") { - p.NoNewPrivileges = context.Bool("no-new-privs") - } - // override the user, if passed - if context.String("user") != "" { - u := strings.SplitN(context.String("user"), ":", 2) - if len(u) > 1 { - gid, err := strconv.Atoi(u[1]) - if err != nil { - return nil, fmt.Errorf("parsing %s as int for gid failed: %v", u[1], err) - } - p.User.GID = uint32(gid) - } - uid, err := strconv.Atoi(u[0]) - if err != nil { - return nil, fmt.Errorf("parsing %s as int for uid failed: %v", u[0], err) - } - p.User.UID = uint32(uid) - } - return &p, nil -} diff --git a/vendor/github.com/opencontainers/runc/kill.go b/vendor/github.com/opencontainers/runc/kill.go deleted file mode 100644 index f80c4db78..000000000 --- a/vendor/github.com/opencontainers/runc/kill.go +++ /dev/null @@ -1,115 +0,0 @@ -// +build linux - -package main - -import ( - "fmt" - "strconv" - "strings" - "syscall" - - "github.com/urfave/cli" -) - -var signalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUS": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CLD": syscall.SIGCLD, - "CONT": syscall.SIGCONT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "POLL": syscall.SIGPOLL, - "PROF": syscall.SIGPROF, - "PWR": syscall.SIGPWR, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STKFLT": syscall.SIGSTKFLT, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "UNUSED": syscall.SIGUNUSED, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} - -var killCommand = cli.Command{ - Name: "kill", - Usage: "kill sends the specified signal (default: SIGTERM) to the container's init process", - ArgsUsage: ` [signal] - -Where "" is the name for the instance of the container and -"[signal]" is the signal to be sent to the init process. - -EXAMPLE: -For example, if the container id is "ubuntu01" the following will send a "KILL" -signal to the init process of the "ubuntu01" container: - - # runc kill ubuntu01 KILL`, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "all, a", - Usage: "send the specified signal to all processes inside the container", - }, - }, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, minArgs); err != nil { - return err - } - if err := checkArgs(context, 2, maxArgs); err != nil { - return err - } - container, err := getContainer(context) - if err != nil { - return err - } - - sigstr := context.Args().Get(1) - if sigstr == "" { - sigstr = "SIGTERM" - } - - signal, err := parseSignal(sigstr) - if err != nil { - return err - } - if err := container.Signal(signal, context.Bool("all")); err != nil { - return err - } - return nil - }, -} - -func parseSignal(rawSignal string) (syscall.Signal, error) { - s, err := strconv.Atoi(rawSignal) - if err == nil { - sig := syscall.Signal(s) - for _, msig := range signalMap { - if sig == msig { - return sig, nil - } - } - return -1, fmt.Errorf("unknown signal %q", rawSignal) - } - signal, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] - if !ok { - return -1, fmt.Errorf("unknown signal %q", rawSignal) - } - return signal, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go deleted file mode 100644 index 8981b2a2f..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go +++ /dev/null @@ -1,114 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - "strings" - - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/syndtr/gocapability/capability" -) - -const allCapabilityTypes = capability.CAPS | capability.BOUNDS | capability.AMBS - -var capabilityMap map[string]capability.Cap - -func init() { - capabilityMap = make(map[string]capability.Cap) - last := capability.CAP_LAST_CAP - // workaround for RHEL6 which has no /proc/sys/kernel/cap_last_cap - if last == capability.Cap(63) { - last = capability.CAP_BLOCK_SUSPEND - } - for _, cap := range capability.List() { - if cap > last { - continue - } - capKey := fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String())) - capabilityMap[capKey] = cap - } -} - -func newContainerCapList(capConfig *configs.Capabilities) (*containerCapabilities, error) { - bounding := []capability.Cap{} - for _, c := range capConfig.Bounding { - v, ok := capabilityMap[c] - if !ok { - return nil, fmt.Errorf("unknown capability %q", c) - } - bounding = append(bounding, v) - } - effective := []capability.Cap{} - for _, c := range capConfig.Effective { - v, ok := capabilityMap[c] - if !ok { - return nil, fmt.Errorf("unknown capability %q", c) - } - effective = append(effective, v) - } - inheritable := []capability.Cap{} - for _, c := range capConfig.Inheritable { - v, ok := capabilityMap[c] - if !ok { - return nil, fmt.Errorf("unknown capability %q", c) - } - inheritable = append(inheritable, v) - } - permitted := []capability.Cap{} - for _, c := range capConfig.Permitted { - v, ok := capabilityMap[c] - if !ok { - return nil, fmt.Errorf("unknown capability %q", c) - } - permitted = append(permitted, v) - } - ambient := []capability.Cap{} - for _, c := range capConfig.Ambient { - v, ok := capabilityMap[c] - if !ok { - return nil, fmt.Errorf("unknown capability %q", c) - } - ambient = append(ambient, v) - } - pid, err := capability.NewPid(os.Getpid()) - if err != nil { - return nil, err - } - return &containerCapabilities{ - bounding: bounding, - effective: effective, - inheritable: inheritable, - permitted: permitted, - ambient: ambient, - pid: pid, - }, nil -} - -type containerCapabilities struct { - pid capability.Capabilities - bounding []capability.Cap - effective []capability.Cap - inheritable []capability.Cap - permitted []capability.Cap - ambient []capability.Cap -} - -// ApplyBoundingSet sets the capability bounding set to those specified in the whitelist. -func (c *containerCapabilities) ApplyBoundingSet() error { - c.pid.Clear(capability.BOUNDS) - c.pid.Set(capability.BOUNDS, c.bounding...) - return c.pid.Apply(capability.BOUNDS) -} - -// Apply sets all the capabilities for the current process in the config. -func (c *containerCapabilities) ApplyCaps() error { - c.pid.Clear(allCapabilityTypes) - c.pid.Set(capability.BOUNDS, c.bounding...) - c.pid.Set(capability.PERMITTED, c.permitted...) - c.pid.Set(capability.INHERITABLE, c.inheritable...) - c.pid.Set(capability.EFFECTIVE, c.effective...) - c.pid.Set(capability.AMBIENT, c.ambient...) - return c.pid.Apply(allCapabilityTypes) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go deleted file mode 100644 index c7bdf1f60..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build linux,!go1.5 - -package libcontainer - -import "syscall" - -// GidMappingsEnableSetgroups was added in Go 1.5, so do nothing when building -// with earlier versions -func enableSetgroups(sys *syscall.SysProcAttr) { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console.go b/vendor/github.com/opencontainers/runc/libcontainer/console.go deleted file mode 100644 index 917acc702..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console.go +++ /dev/null @@ -1,17 +0,0 @@ -package libcontainer - -import ( - "io" - "os" -) - -// Console represents a pseudo TTY. -type Console interface { - io.ReadWriteCloser - - // Path returns the filesystem path to the slave side of the pty. - Path() string - - // Fd returns the fd for the master of the pty. - File() *os.File -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go b/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go deleted file mode 100644 index b7166a31f..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build freebsd - -package libcontainer - -import ( - "errors" -) - -// newConsole returns an initialized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func newConsole() (Console, error) { - return nil, errors.New("libcontainer console is not supported on FreeBSD") -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go deleted file mode 100644 index 5e364a88a..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go +++ /dev/null @@ -1,157 +0,0 @@ -package libcontainer - -import ( - "fmt" - "os" - "unsafe" - - "golang.org/x/sys/unix" -) - -func ConsoleFromFile(f *os.File) Console { - return &linuxConsole{ - master: f, - } -} - -// newConsole returns an initialized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func newConsole() (Console, error) { - master, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - if err := saneTerminal(master); err != nil { - return nil, err - } - console, err := ptsname(master) - if err != nil { - return nil, err - } - if err := unlockpt(master); err != nil { - return nil, err - } - return &linuxConsole{ - slavePath: console, - master: master, - }, nil -} - -// linuxConsole is a linux pseudo TTY for use within a container. -type linuxConsole struct { - master *os.File - slavePath string -} - -func (c *linuxConsole) File() *os.File { - return c.master -} - -func (c *linuxConsole) Path() string { - return c.slavePath -} - -func (c *linuxConsole) Read(b []byte) (int, error) { - return c.master.Read(b) -} - -func (c *linuxConsole) Write(b []byte) (int, error) { - return c.master.Write(b) -} - -func (c *linuxConsole) Close() error { - if m := c.master; m != nil { - return m.Close() - } - return nil -} - -// mount initializes the console inside the rootfs mounting with the specified mount label -// and applying the correct ownership of the console. -func (c *linuxConsole) mount() error { - oldMask := unix.Umask(0000) - defer unix.Umask(oldMask) - f, err := os.Create("/dev/console") - if err != nil && !os.IsExist(err) { - return err - } - if f != nil { - f.Close() - } - return unix.Mount(c.slavePath, "/dev/console", "bind", unix.MS_BIND, "") -} - -// dupStdio opens the slavePath for the console and dups the fds to the current -// processes stdio, fd 0,1,2. -func (c *linuxConsole) dupStdio() error { - slave, err := c.open(unix.O_RDWR) - if err != nil { - return err - } - fd := int(slave.Fd()) - for _, i := range []int{0, 1, 2} { - if err := unix.Dup3(fd, i, 0); err != nil { - return err - } - } - return nil -} - -// open is a clone of os.OpenFile without the O_CLOEXEC used to open the pty slave. -func (c *linuxConsole) open(flag int) (*os.File, error) { - r, e := unix.Open(c.slavePath, flag, 0) - if e != nil { - return nil, &os.PathError{ - Op: "open", - Path: c.slavePath, - Err: e, - } - } - return os.NewFile(uintptr(r), c.slavePath), nil -} - -func ioctl(fd uintptr, flag, data uintptr) error { - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { - return err - } - return nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - var u int32 - return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - var n int32 - if err := ioctl(f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} - -// saneTerminal sets the necessary tty_ioctl(4)s to ensure that a pty pair -// created by us acts normally. In particular, a not-very-well-known default of -// Linux unix98 ptys is that they have +onlcr by default. While this isn't a -// problem for terminal emulators, because we relay data from the terminal we -// also relay that funky line discipline. -func saneTerminal(terminal *os.File) error { - // Go doesn't have a wrapper for any of the termios ioctls. - var termios unix.Termios - - if err := ioctl(terminal.Fd(), unix.TCGETS, uintptr(unsafe.Pointer(&termios))); err != nil { - return fmt.Errorf("ioctl(tty, tcgets): %s", err.Error()) - } - - // Set -onlcr so we don't have to deal with \r. - termios.Oflag &^= unix.ONLCR - - if err := ioctl(terminal.Fd(), unix.TCSETS, uintptr(unsafe.Pointer(&termios))); err != nil { - return fmt.Errorf("ioctl(tty, tcsets): %s", err.Error()) - } - - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go deleted file mode 100644 index e5ca54599..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package libcontainer - -import ( - "errors" -) - -// newConsole returns an initialized console that can be used within a container by copying bytes -// from the master side to the slave that is attached as the tty for the container's init process. -func newConsole() (Console, error) { - return nil, errors.New("libcontainer console is not supported on Solaris") -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go deleted file mode 100644 index c61e866a5..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package libcontainer - -// newConsole returns an initialized console that can be used within a container -func newConsole() (Console, error) { - return &windowsConsole{}, nil -} - -// windowsConsole is a Windows pseudo TTY for use within a container. -type windowsConsole struct { -} - -func (c *windowsConsole) Fd() uintptr { - return 0 -} - -func (c *windowsConsole) Path() string { - return "" -} - -func (c *windowsConsole) Read(b []byte) (int, error) { - return 0, nil -} - -func (c *windowsConsole) Write(b []byte) (int, error) { - return 0, nil -} - -func (c *windowsConsole) Close() error { - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container.go b/vendor/github.com/opencontainers/runc/libcontainer/container.go deleted file mode 100644 index 3ddb5ec62..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/container.go +++ /dev/null @@ -1,166 +0,0 @@ -// Package libcontainer provides a native Go implementation for creating containers -// with namespaces, cgroups, capabilities, and filesystem access controls. -// It allows you to manage the lifecycle of the container performing additional operations -// after the container is created. -package libcontainer - -import ( - "os" - "time" - - "github.com/opencontainers/runc/libcontainer/configs" -) - -// Status is the status of a container. -type Status int - -const ( - // Created is the status that denotes the container exists but has not been run yet. - Created Status = iota - // Running is the status that denotes the container exists and is running. - Running - // Pausing is the status that denotes the container exists, it is in the process of being paused. - Pausing - // Paused is the status that denotes the container exists, but all its processes are paused. - Paused - // Stopped is the status that denotes the container does not have a created or running process. - Stopped -) - -func (s Status) String() string { - switch s { - case Created: - return "created" - case Running: - return "running" - case Pausing: - return "pausing" - case Paused: - return "paused" - case Stopped: - return "stopped" - default: - return "unknown" - } -} - -// BaseState represents the platform agnostic pieces relating to a -// running container's state -type BaseState struct { - // ID is the container ID. - ID string `json:"id"` - - // InitProcessPid is the init process id in the parent namespace. - InitProcessPid int `json:"init_process_pid"` - - // InitProcessStartTime is the init process start time in clock cycles since boot time. - InitProcessStartTime string `json:"init_process_start"` - - // Created is the unix timestamp for the creation time of the container in UTC - Created time.Time `json:"created"` - - // Config is the container's configuration. - Config configs.Config `json:"config"` -} - -// BaseContainer is a libcontainer container object. -// -// Each container is thread-safe within the same process. Since a container can -// be destroyed by a separate process, any function may return that the container -// was not found. BaseContainer includes methods that are platform agnostic. -type BaseContainer interface { - // Returns the ID of the container - ID() string - - // Returns the current status of the container. - // - // errors: - // ContainerNotExists - Container no longer exists, - // Systemerror - System error. - Status() (Status, error) - - // State returns the current container's state information. - // - // errors: - // SystemError - System error. - State() (*State, error) - - // Returns the current config of the container. - Config() configs.Config - - // Returns the PIDs inside this container. The PIDs are in the namespace of the calling process. - // - // errors: - // ContainerNotExists - Container no longer exists, - // Systemerror - System error. - // - // Some of the returned PIDs may no longer refer to processes in the Container, unless - // the Container state is PAUSED in which case every PID in the slice is valid. - Processes() ([]int, error) - - // Returns statistics for the container. - // - // errors: - // ContainerNotExists - Container no longer exists, - // Systemerror - System error. - Stats() (*Stats, error) - - // Set resources of container as configured - // - // We can use this to change resources when containers are running. - // - // errors: - // SystemError - System error. - Set(config configs.Config) error - - // Start a process inside the container. Returns error if process fails to - // start. You can track process lifecycle with passed Process structure. - // - // errors: - // ContainerNotExists - Container no longer exists, - // ConfigInvalid - config is invalid, - // ContainerPaused - Container is paused, - // SystemError - System error. - Start(process *Process) (err error) - - // Run immediately starts the process inside the container. Returns error if process - // fails to start. It does not block waiting for the exec fifo after start returns but - // opens the fifo after start returns. - // - // errors: - // ContainerNotExists - Container no longer exists, - // ConfigInvalid - config is invalid, - // ContainerPaused - Container is paused, - // SystemError - System error. - Run(process *Process) (err error) - - // Destroys the container, if its in a valid state, after killing any - // remaining running processes. - // - // Any event registrations are removed before the container is destroyed. - // No error is returned if the container is already destroyed. - // - // Running containers must first be stopped using Signal(..). - // Paused containers must first be resumed using Resume(..). - // - // errors: - // ContainerNotStopped - Container is still running, - // ContainerPaused - Container is paused, - // SystemError - System error. - Destroy() error - - // Signal sends the provided signal code to the container's initial process. - // - // If all is specified the signal is sent to all processes in the container - // including the initial process. - // - // errors: - // SystemError - System error. - Signal(s os.Signal, all bool) error - - // Exec signals the container to exec the users process at the end of the init. - // - // errors: - // SystemError - System error. - Exec() error -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go deleted file mode 100644 index b5563d693..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go +++ /dev/null @@ -1,1579 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "reflect" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/golang/protobuf/proto" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/criurpc" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/runc/libcontainer/utils" - "github.com/syndtr/gocapability/capability" - "github.com/vishvananda/netlink/nl" -) - -const stdioFdCount = 3 - -type linuxContainer struct { - id string - root string - config *configs.Config - cgroupManager cgroups.Manager - initArgs []string - initProcess parentProcess - initProcessStartTime string - criuPath string - m sync.Mutex - criuVersion int - state containerState - created time.Time -} - -// State represents a running container's state -type State struct { - BaseState - - // Platform specific fields below here - - // Specifies if the container was started under the rootless mode. - Rootless bool `json:"rootless"` - - // Path to all the cgroups setup for a container. Key is cgroup subsystem name - // with the value as the path. - CgroupPaths map[string]string `json:"cgroup_paths"` - - // NamespacePaths are filepaths to the container's namespaces. Key is the namespace type - // with the value as the path. - NamespacePaths map[configs.NamespaceType]string `json:"namespace_paths"` - - // Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore - ExternalDescriptors []string `json:"external_descriptors,omitempty"` -} - -// Container is a libcontainer container object. -// -// Each container is thread-safe within the same process. Since a container can -// be destroyed by a separate process, any function may return that the container -// was not found. -type Container interface { - BaseContainer - - // Methods below here are platform specific - - // Checkpoint checkpoints the running container's state to disk using the criu(8) utility. - // - // errors: - // Systemerror - System error. - Checkpoint(criuOpts *CriuOpts) error - - // Restore restores the checkpointed container to a running state using the criu(8) utility. - // - // errors: - // Systemerror - System error. - Restore(process *Process, criuOpts *CriuOpts) error - - // If the Container state is RUNNING or CREATED, sets the Container state to PAUSING and pauses - // the execution of any user processes. Asynchronously, when the container finished being paused the - // state is changed to PAUSED. - // If the Container state is PAUSED, do nothing. - // - // errors: - // ContainerNotExists - Container no longer exists, - // ContainerNotRunning - Container not running or created, - // Systemerror - System error. - Pause() error - - // If the Container state is PAUSED, resumes the execution of any user processes in the - // Container before setting the Container state to RUNNING. - // If the Container state is RUNNING, do nothing. - // - // errors: - // ContainerNotExists - Container no longer exists, - // ContainerNotPaused - Container is not paused, - // Systemerror - System error. - Resume() error - - // NotifyOOM returns a read-only channel signaling when the container receives an OOM notification. - // - // errors: - // Systemerror - System error. - NotifyOOM() (<-chan struct{}, error) - - // NotifyMemoryPressure returns a read-only channel signaling when the container reaches a given pressure level - // - // errors: - // Systemerror - System error. - NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) -} - -// ID returns the container's unique ID -func (c *linuxContainer) ID() string { - return c.id -} - -// Config returns the container's configuration -func (c *linuxContainer) Config() configs.Config { - return *c.config -} - -func (c *linuxContainer) Status() (Status, error) { - c.m.Lock() - defer c.m.Unlock() - return c.currentStatus() -} - -func (c *linuxContainer) State() (*State, error) { - c.m.Lock() - defer c.m.Unlock() - return c.currentState() -} - -func (c *linuxContainer) Processes() ([]int, error) { - pids, err := c.cgroupManager.GetAllPids() - if err != nil { - return nil, newSystemErrorWithCause(err, "getting all container pids from cgroups") - } - return pids, nil -} - -func (c *linuxContainer) Stats() (*Stats, error) { - var ( - err error - stats = &Stats{} - ) - if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil { - return stats, newSystemErrorWithCause(err, "getting container stats from cgroups") - } - for _, iface := range c.config.Networks { - switch iface.Type { - case "veth": - istats, err := getNetworkInterfaceStats(iface.HostInterfaceName) - if err != nil { - return stats, newSystemErrorWithCausef(err, "getting network stats for interface %q", iface.HostInterfaceName) - } - stats.Interfaces = append(stats.Interfaces, istats) - } - } - return stats, nil -} - -func (c *linuxContainer) Set(config configs.Config) error { - c.m.Lock() - defer c.m.Unlock() - status, err := c.currentStatus() - if err != nil { - return err - } - if status == Stopped { - return newGenericError(fmt.Errorf("container not running"), ContainerNotRunning) - } - c.config = &config - return c.cgroupManager.Set(c.config) -} - -func (c *linuxContainer) Start(process *Process) error { - c.m.Lock() - defer c.m.Unlock() - status, err := c.currentStatus() - if err != nil { - return err - } - if status == Stopped { - if err := c.createExecFifo(); err != nil { - return err - } - } - if err := c.start(process, status == Stopped); err != nil { - if status == Stopped { - c.deleteExecFifo() - } - return err - } - return nil -} - -func (c *linuxContainer) Run(process *Process) error { - c.m.Lock() - status, err := c.currentStatus() - if err != nil { - c.m.Unlock() - return err - } - c.m.Unlock() - if err := c.Start(process); err != nil { - return err - } - if status == Stopped { - return c.exec() - } - return nil -} - -func (c *linuxContainer) Exec() error { - c.m.Lock() - defer c.m.Unlock() - return c.exec() -} - -func (c *linuxContainer) exec() error { - path := filepath.Join(c.root, execFifoFilename) - f, err := os.OpenFile(path, os.O_RDONLY, 0) - if err != nil { - return newSystemErrorWithCause(err, "open exec fifo for reading") - } - defer f.Close() - data, err := ioutil.ReadAll(f) - if err != nil { - return err - } - if len(data) > 0 { - os.Remove(path) - return nil - } - return fmt.Errorf("cannot start an already running container") -} - -func (c *linuxContainer) start(process *Process, isInit bool) error { - parent, err := c.newParentProcess(process, isInit) - if err != nil { - return newSystemErrorWithCause(err, "creating new parent process") - } - if err := parent.start(); err != nil { - // terminate the process to ensure that it properly is reaped. - if err := parent.terminate(); err != nil { - logrus.Warn(err) - } - return newSystemErrorWithCause(err, "starting container process") - } - // generate a timestamp indicating when the container was started - c.created = time.Now().UTC() - if isInit { - c.state = &createdState{ - c: c, - } - state, err := c.updateState(parent) - if err != nil { - return err - } - c.initProcessStartTime = state.InitProcessStartTime - - if c.config.Hooks != nil { - s := configs.HookState{ - Version: c.config.Version, - ID: c.id, - Pid: parent.pid(), - Bundle: utils.SearchLabels(c.config.Labels, "bundle"), - } - for i, hook := range c.config.Hooks.Poststart { - if err := hook.Run(s); err != nil { - if err := parent.terminate(); err != nil { - logrus.Warn(err) - } - return newSystemErrorWithCausef(err, "running poststart hook %d", i) - } - } - } - } else { - c.state = &runningState{ - c: c, - } - } - return nil -} - -func (c *linuxContainer) Signal(s os.Signal, all bool) error { - if all { - return signalAllProcesses(c.cgroupManager, s) - } - if err := c.initProcess.signal(s); err != nil { - return newSystemErrorWithCause(err, "signaling init process") - } - return nil -} - -func (c *linuxContainer) createExecFifo() error { - rootuid, err := c.Config().HostRootUID() - if err != nil { - return err - } - rootgid, err := c.Config().HostRootGID() - if err != nil { - return err - } - - fifoName := filepath.Join(c.root, execFifoFilename) - if _, err := os.Stat(fifoName); err == nil { - return fmt.Errorf("exec fifo %s already exists", fifoName) - } - oldMask := syscall.Umask(0000) - if err := syscall.Mkfifo(fifoName, 0622); err != nil { - syscall.Umask(oldMask) - return err - } - syscall.Umask(oldMask) - if err := os.Chown(fifoName, rootuid, rootgid); err != nil { - return err - } - return nil -} - -func (c *linuxContainer) deleteExecFifo() { - fifoName := filepath.Join(c.root, execFifoFilename) - os.Remove(fifoName) -} - -func (c *linuxContainer) newParentProcess(p *Process, doInit bool) (parentProcess, error) { - parentPipe, childPipe, err := utils.NewSockPair("init") - if err != nil { - return nil, newSystemErrorWithCause(err, "creating new init pipe") - } - cmd, err := c.commandTemplate(p, childPipe) - if err != nil { - return nil, newSystemErrorWithCause(err, "creating new command template") - } - if !doInit { - return c.newSetnsProcess(p, cmd, parentPipe, childPipe) - } - - // We only set up rootDir if we're not doing a `runc exec`. The reason for - // this is to avoid cases where a racing, unprivileged process inside the - // container can get access to the statedir file descriptor (which would - // allow for container rootfs escape). - rootDir, err := os.Open(c.root) - if err != nil { - return nil, err - } - cmd.ExtraFiles = append(cmd.ExtraFiles, rootDir) - cmd.Env = append(cmd.Env, - fmt.Sprintf("_LIBCONTAINER_STATEDIR=%d", stdioFdCount+len(cmd.ExtraFiles)-1)) - return c.newInitProcess(p, cmd, parentPipe, childPipe, rootDir) -} - -func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.Cmd, error) { - cmd := exec.Command(c.initArgs[0], c.initArgs[1:]...) - cmd.Stdin = p.Stdin - cmd.Stdout = p.Stdout - cmd.Stderr = p.Stderr - cmd.Dir = c.config.Rootfs - if cmd.SysProcAttr == nil { - cmd.SysProcAttr = &syscall.SysProcAttr{} - } - cmd.ExtraFiles = append(cmd.ExtraFiles, p.ExtraFiles...) - if p.ConsoleSocket != nil { - cmd.ExtraFiles = append(cmd.ExtraFiles, p.ConsoleSocket) - cmd.Env = append(cmd.Env, - fmt.Sprintf("_LIBCONTAINER_CONSOLE=%d", stdioFdCount+len(cmd.ExtraFiles)-1), - ) - } - cmd.ExtraFiles = append(cmd.ExtraFiles, childPipe) - cmd.Env = append(cmd.Env, - fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1), - ) - // NOTE: when running a container with no PID namespace and the parent process spawning the container is - // PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason - // even with the parent still running. - if c.config.ParentDeathSignal > 0 { - cmd.SysProcAttr.Pdeathsig = syscall.Signal(c.config.ParentDeathSignal) - } - return cmd, nil -} - -func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe, rootDir *os.File) (*initProcess, error) { - cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initStandard)) - nsMaps := make(map[configs.NamespaceType]string) - for _, ns := range c.config.Namespaces { - if ns.Path != "" { - nsMaps[ns.Type] = ns.Path - } - } - _, sharePidns := nsMaps[configs.NEWPID] - data, err := c.bootstrapData(c.config.Namespaces.CloneFlags(), nsMaps) - if err != nil { - return nil, err - } - return &initProcess{ - cmd: cmd, - childPipe: childPipe, - parentPipe: parentPipe, - manager: c.cgroupManager, - config: c.newInitConfig(p), - container: c, - process: p, - bootstrapData: data, - sharePidns: sharePidns, - rootDir: rootDir, - }, nil -} - -func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*setnsProcess, error) { - cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns)) - state, err := c.currentState() - if err != nil { - return nil, newSystemErrorWithCause(err, "getting container's current state") - } - // for setns process, we don't have to set cloneflags as the process namespaces - // will only be set via setns syscall - data, err := c.bootstrapData(0, state.NamespacePaths) - if err != nil { - return nil, err - } - return &setnsProcess{ - cmd: cmd, - cgroupPaths: c.cgroupManager.GetPaths(), - childPipe: childPipe, - parentPipe: parentPipe, - config: c.newInitConfig(p), - process: p, - bootstrapData: data, - }, nil -} - -func (c *linuxContainer) newInitConfig(process *Process) *initConfig { - cfg := &initConfig{ - Config: c.config, - Args: process.Args, - Env: process.Env, - User: process.User, - AdditionalGroups: process.AdditionalGroups, - Cwd: process.Cwd, - Capabilities: process.Capabilities, - PassedFilesCount: len(process.ExtraFiles), - ContainerId: c.ID(), - NoNewPrivileges: c.config.NoNewPrivileges, - Rootless: c.config.Rootless, - AppArmorProfile: c.config.AppArmorProfile, - ProcessLabel: c.config.ProcessLabel, - Rlimits: c.config.Rlimits, - } - if process.NoNewPrivileges != nil { - cfg.NoNewPrivileges = *process.NoNewPrivileges - } - if process.AppArmorProfile != "" { - cfg.AppArmorProfile = process.AppArmorProfile - } - if process.Label != "" { - cfg.ProcessLabel = process.Label - } - if len(process.Rlimits) > 0 { - cfg.Rlimits = process.Rlimits - } - cfg.CreateConsole = process.ConsoleSocket != nil - return cfg -} - -func (c *linuxContainer) Destroy() error { - c.m.Lock() - defer c.m.Unlock() - return c.state.destroy() -} - -func (c *linuxContainer) Pause() error { - c.m.Lock() - defer c.m.Unlock() - status, err := c.currentStatus() - if err != nil { - return err - } - switch status { - case Running, Created: - if err := c.cgroupManager.Freeze(configs.Frozen); err != nil { - return err - } - return c.state.transition(&pausedState{ - c: c, - }) - } - return newGenericError(fmt.Errorf("container not running or created: %s", status), ContainerNotRunning) -} - -func (c *linuxContainer) Resume() error { - c.m.Lock() - defer c.m.Unlock() - status, err := c.currentStatus() - if err != nil { - return err - } - if status != Paused { - return newGenericError(fmt.Errorf("container not paused"), ContainerNotPaused) - } - if err := c.cgroupManager.Freeze(configs.Thawed); err != nil { - return err - } - return c.state.transition(&runningState{ - c: c, - }) -} - -func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) { - // XXX(cyphar): This requires cgroups. - if c.config.Rootless { - return nil, fmt.Errorf("cannot get OOM notifications from rootless container") - } - return notifyOnOOM(c.cgroupManager.GetPaths()) -} - -func (c *linuxContainer) NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) { - // XXX(cyphar): This requires cgroups. - if c.config.Rootless { - return nil, fmt.Errorf("cannot get memory pressure notifications from rootless container") - } - return notifyMemoryPressure(c.cgroupManager.GetPaths(), level) -} - -var criuFeatures *criurpc.CriuFeatures - -func (c *linuxContainer) checkCriuFeatures(criuOpts *CriuOpts, rpcOpts *criurpc.CriuOpts, criuFeat *criurpc.CriuFeatures) error { - - var t criurpc.CriuReqType - t = criurpc.CriuReqType_FEATURE_CHECK - - if err := c.checkCriuVersion("1.8"); err != nil { - // Feature checking was introduced with CRIU 1.8. - // Ignore the feature check if an older CRIU version is used - // and just act as before. - // As all automated PR testing is done using CRIU 1.7 this - // code will not be tested by automated PR testing. - return nil - } - - // make sure the features we are looking for are really not from - // some previous check - criuFeatures = nil - - req := &criurpc.CriuReq{ - Type: &t, - // Theoretically this should not be necessary but CRIU - // segfaults if Opts is empty. - // Fixed in CRIU 2.12 - Opts: rpcOpts, - Features: criuFeat, - } - - err := c.criuSwrk(nil, req, criuOpts, false) - if err != nil { - logrus.Debugf("%s", err) - return fmt.Errorf("CRIU feature check failed") - } - - logrus.Debugf("Feature check says: %s", criuFeatures) - missingFeatures := false - - if *criuFeat.MemTrack && !*criuFeatures.MemTrack { - missingFeatures = true - logrus.Debugf("CRIU does not support MemTrack") - } - - if missingFeatures { - return fmt.Errorf("CRIU is missing features") - } - - return nil -} - -// checkCriuVersion checks Criu version greater than or equal to minVersion -func (c *linuxContainer) checkCriuVersion(minVersion string) error { - var x, y, z, versionReq int - - _, err := fmt.Sscanf(minVersion, "%d.%d.%d\n", &x, &y, &z) // 1.5.2 - if err != nil { - _, err = fmt.Sscanf(minVersion, "Version: %d.%d\n", &x, &y) // 1.6 - } - versionReq = x*10000 + y*100 + z - - out, err := exec.Command(c.criuPath, "-V").Output() - if err != nil { - return fmt.Errorf("Unable to execute CRIU command: %s", c.criuPath) - } - - x = 0 - y = 0 - z = 0 - if ep := strings.Index(string(out), "-"); ep >= 0 { - // criu Git version format - var version string - if sp := strings.Index(string(out), "GitID"); sp > 0 { - version = string(out)[sp:ep] - } else { - return fmt.Errorf("Unable to parse the CRIU version: %s", c.criuPath) - } - - n, err := fmt.Sscanf(string(version), "GitID: v%d.%d.%d", &x, &y, &z) // 1.5.2 - if err != nil { - n, err = fmt.Sscanf(string(version), "GitID: v%d.%d", &x, &y) // 1.6 - y++ - } else { - z++ - } - if n < 2 || err != nil { - return fmt.Errorf("Unable to parse the CRIU version: %s %d %s", version, n, err) - } - } else { - // criu release version format - n, err := fmt.Sscanf(string(out), "Version: %d.%d.%d\n", &x, &y, &z) // 1.5.2 - if err != nil { - n, err = fmt.Sscanf(string(out), "Version: %d.%d\n", &x, &y) // 1.6 - } - if n < 2 || err != nil { - return fmt.Errorf("Unable to parse the CRIU version: %s %d %s", out, n, err) - } - } - - c.criuVersion = x*10000 + y*100 + z - - if c.criuVersion < versionReq { - return fmt.Errorf("CRIU version must be %s or higher", minVersion) - } - - return nil -} - -const descriptorsFilename = "descriptors.json" - -func (c *linuxContainer) addCriuDumpMount(req *criurpc.CriuReq, m *configs.Mount) { - mountDest := m.Destination - if strings.HasPrefix(mountDest, c.config.Rootfs) { - mountDest = mountDest[len(c.config.Rootfs):] - } - - extMnt := &criurpc.ExtMountMap{ - Key: proto.String(mountDest), - Val: proto.String(mountDest), - } - req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) -} - -func (c *linuxContainer) addMaskPaths(req *criurpc.CriuReq) error { - for _, path := range c.config.MaskPaths { - fi, err := os.Stat(fmt.Sprintf("/proc/%d/root/%s", c.initProcess.pid(), path)) - if err != nil { - if os.IsNotExist(err) { - continue - } - return err - } - if fi.IsDir() { - continue - } - - extMnt := &criurpc.ExtMountMap{ - Key: proto.String(path), - Val: proto.String("/dev/null"), - } - req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) - } - - return nil -} - -func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { - c.m.Lock() - defer c.m.Unlock() - - // TODO(avagin): Figure out how to make this work nicely. CRIU 2.0 has - // support for doing unprivileged dumps, but the setup of - // rootless containers might make this complicated. - if c.config.Rootless { - return fmt.Errorf("cannot checkpoint a rootless container") - } - - if err := c.checkCriuVersion("1.5.2"); err != nil { - return err - } - - if criuOpts.ImagesDirectory == "" { - return fmt.Errorf("invalid directory to save checkpoint") - } - - // Since a container can be C/R'ed multiple times, - // the checkpoint directory may already exist. - if err := os.Mkdir(criuOpts.ImagesDirectory, 0755); err != nil && !os.IsExist(err) { - return err - } - - if criuOpts.WorkDirectory == "" { - criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") - } - - if err := os.Mkdir(criuOpts.WorkDirectory, 0755); err != nil && !os.IsExist(err) { - return err - } - - workDir, err := os.Open(criuOpts.WorkDirectory) - if err != nil { - return err - } - defer workDir.Close() - - imageDir, err := os.Open(criuOpts.ImagesDirectory) - if err != nil { - return err - } - defer imageDir.Close() - - rpcOpts := criurpc.CriuOpts{ - ImagesDirFd: proto.Int32(int32(imageDir.Fd())), - WorkDirFd: proto.Int32(int32(workDir.Fd())), - LogLevel: proto.Int32(4), - LogFile: proto.String("dump.log"), - Root: proto.String(c.config.Rootfs), - ManageCgroups: proto.Bool(true), - NotifyScripts: proto.Bool(true), - Pid: proto.Int32(int32(c.initProcess.pid())), - ShellJob: proto.Bool(criuOpts.ShellJob), - LeaveRunning: proto.Bool(criuOpts.LeaveRunning), - TcpEstablished: proto.Bool(criuOpts.TcpEstablished), - ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections), - FileLocks: proto.Bool(criuOpts.FileLocks), - EmptyNs: proto.Uint32(criuOpts.EmptyNs), - } - - // append optional criu opts, e.g., page-server and port - if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 { - rpcOpts.Ps = &criurpc.CriuPageServerInfo{ - Address: proto.String(criuOpts.PageServer.Address), - Port: proto.Int32(criuOpts.PageServer.Port), - } - } - - //pre-dump may need parentImage param to complete iterative migration - if criuOpts.ParentImage != "" { - rpcOpts.ParentImg = proto.String(criuOpts.ParentImage) - rpcOpts.TrackMem = proto.Bool(true) - } - - // append optional manage cgroups mode - if criuOpts.ManageCgroupsMode != 0 { - if err := c.checkCriuVersion("1.7"); err != nil { - return err - } - mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode) - rpcOpts.ManageCgroupsMode = &mode - } - - var t criurpc.CriuReqType - if criuOpts.PreDump { - feat := criurpc.CriuFeatures{ - MemTrack: proto.Bool(true), - } - - if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil { - return err - } - - t = criurpc.CriuReqType_PRE_DUMP - } else { - t = criurpc.CriuReqType_DUMP - } - req := &criurpc.CriuReq{ - Type: &t, - Opts: &rpcOpts, - } - - //no need to dump these information in pre-dump - if !criuOpts.PreDump { - for _, m := range c.config.Mounts { - switch m.Device { - case "bind": - c.addCriuDumpMount(req, m) - break - case "cgroup": - binds, err := getCgroupMounts(m) - if err != nil { - return err - } - for _, b := range binds { - c.addCriuDumpMount(req, b) - } - break - } - } - - if err := c.addMaskPaths(req); err != nil { - return err - } - - for _, node := range c.config.Devices { - m := &configs.Mount{Destination: node.Path, Source: node.Path} - c.addCriuDumpMount(req, m) - } - - // Write the FD info to a file in the image directory - fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors()) - if err != nil { - return err - } - - err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0655) - if err != nil { - return err - } - } - - err = c.criuSwrk(nil, req, criuOpts, false) - if err != nil { - return err - } - return nil -} - -func (c *linuxContainer) addCriuRestoreMount(req *criurpc.CriuReq, m *configs.Mount) { - mountDest := m.Destination - if strings.HasPrefix(mountDest, c.config.Rootfs) { - mountDest = mountDest[len(c.config.Rootfs):] - } - - extMnt := &criurpc.ExtMountMap{ - Key: proto.String(mountDest), - Val: proto.String(m.Source), - } - req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) -} - -func (c *linuxContainer) restoreNetwork(req *criurpc.CriuReq, criuOpts *CriuOpts) { - for _, iface := range c.config.Networks { - switch iface.Type { - case "veth": - veth := new(criurpc.CriuVethPair) - veth.IfOut = proto.String(iface.HostInterfaceName) - veth.IfIn = proto.String(iface.Name) - req.Opts.Veths = append(req.Opts.Veths, veth) - break - case "loopback": - break - } - } - for _, i := range criuOpts.VethPairs { - veth := new(criurpc.CriuVethPair) - veth.IfOut = proto.String(i.HostInterfaceName) - veth.IfIn = proto.String(i.ContainerInterfaceName) - req.Opts.Veths = append(req.Opts.Veths, veth) - } -} - -func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error { - c.m.Lock() - defer c.m.Unlock() - - // TODO(avagin): Figure out how to make this work nicely. CRIU doesn't have - // support for unprivileged restore at the moment. - if c.config.Rootless { - return fmt.Errorf("cannot restore a rootless container") - } - - if err := c.checkCriuVersion("1.5.2"); err != nil { - return err - } - if criuOpts.WorkDirectory == "" { - criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") - } - // Since a container can be C/R'ed multiple times, - // the work directory may already exist. - if err := os.Mkdir(criuOpts.WorkDirectory, 0655); err != nil && !os.IsExist(err) { - return err - } - workDir, err := os.Open(criuOpts.WorkDirectory) - if err != nil { - return err - } - defer workDir.Close() - if criuOpts.ImagesDirectory == "" { - return fmt.Errorf("invalid directory to restore checkpoint") - } - imageDir, err := os.Open(criuOpts.ImagesDirectory) - if err != nil { - return err - } - defer imageDir.Close() - // CRIU has a few requirements for a root directory: - // * it must be a mount point - // * its parent must not be overmounted - // c.config.Rootfs is bind-mounted to a temporary directory - // to satisfy these requirements. - root := filepath.Join(c.root, "criu-root") - if err := os.Mkdir(root, 0755); err != nil { - return err - } - defer os.Remove(root) - root, err = filepath.EvalSymlinks(root) - if err != nil { - return err - } - err = syscall.Mount(c.config.Rootfs, root, "", syscall.MS_BIND|syscall.MS_REC, "") - if err != nil { - return err - } - defer syscall.Unmount(root, syscall.MNT_DETACH) - t := criurpc.CriuReqType_RESTORE - req := &criurpc.CriuReq{ - Type: &t, - Opts: &criurpc.CriuOpts{ - ImagesDirFd: proto.Int32(int32(imageDir.Fd())), - WorkDirFd: proto.Int32(int32(workDir.Fd())), - EvasiveDevices: proto.Bool(true), - LogLevel: proto.Int32(4), - LogFile: proto.String("restore.log"), - RstSibling: proto.Bool(true), - Root: proto.String(root), - ManageCgroups: proto.Bool(true), - NotifyScripts: proto.Bool(true), - ShellJob: proto.Bool(criuOpts.ShellJob), - ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections), - TcpEstablished: proto.Bool(criuOpts.TcpEstablished), - FileLocks: proto.Bool(criuOpts.FileLocks), - EmptyNs: proto.Uint32(criuOpts.EmptyNs), - }, - } - - for _, m := range c.config.Mounts { - switch m.Device { - case "bind": - c.addCriuRestoreMount(req, m) - break - case "cgroup": - binds, err := getCgroupMounts(m) - if err != nil { - return err - } - for _, b := range binds { - c.addCriuRestoreMount(req, b) - } - break - } - } - - if len(c.config.MaskPaths) > 0 { - m := &configs.Mount{Destination: "/dev/null", Source: "/dev/null"} - c.addCriuRestoreMount(req, m) - } - - for _, node := range c.config.Devices { - m := &configs.Mount{Destination: node.Path, Source: node.Path} - c.addCriuRestoreMount(req, m) - } - - if criuOpts.EmptyNs&syscall.CLONE_NEWNET == 0 { - c.restoreNetwork(req, criuOpts) - } - - // append optional manage cgroups mode - if criuOpts.ManageCgroupsMode != 0 { - if err := c.checkCriuVersion("1.7"); err != nil { - return err - } - mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode) - req.Opts.ManageCgroupsMode = &mode - } - - var ( - fds []string - fdJSON []byte - ) - if fdJSON, err = ioutil.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename)); err != nil { - return err - } - - if err := json.Unmarshal(fdJSON, &fds); err != nil { - return err - } - for i := range fds { - if s := fds[i]; strings.Contains(s, "pipe:") { - inheritFd := new(criurpc.InheritFd) - inheritFd.Key = proto.String(s) - inheritFd.Fd = proto.Int32(int32(i)) - req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd) - } - } - return c.criuSwrk(process, req, criuOpts, true) -} - -func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error { - // XXX: Do we need to deal with this case? AFAIK criu still requires root. - if err := c.cgroupManager.Apply(pid); err != nil { - return err - } - - if err := c.cgroupManager.Set(c.config); err != nil { - return newSystemError(err) - } - - path := fmt.Sprintf("/proc/%d/cgroup", pid) - cgroupsPaths, err := cgroups.ParseCgroupFile(path) - if err != nil { - return err - } - - for c, p := range cgroupsPaths { - cgroupRoot := &criurpc.CgroupRoot{ - Ctrl: proto.String(c), - Path: proto.String(p), - } - req.Opts.CgRoot = append(req.Opts.CgRoot, cgroupRoot) - } - - return nil -} - -func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts, applyCgroups bool) error { - fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_SEQPACKET|syscall.SOCK_CLOEXEC, 0) - if err != nil { - return err - } - - logPath := filepath.Join(opts.WorkDirectory, req.GetOpts().GetLogFile()) - criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client") - criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server") - defer criuClient.Close() - defer criuServer.Close() - - args := []string{"swrk", "3"} - logrus.Debugf("Using CRIU %d at: %s", c.criuVersion, c.criuPath) - logrus.Debugf("Using CRIU with following args: %s", args) - cmd := exec.Command(c.criuPath, args...) - if process != nil { - cmd.Stdin = process.Stdin - cmd.Stdout = process.Stdout - cmd.Stderr = process.Stderr - } - cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer) - - if err := cmd.Start(); err != nil { - return err - } - criuServer.Close() - - defer func() { - criuClient.Close() - _, err := cmd.Process.Wait() - if err != nil { - return - } - }() - - if applyCgroups { - err := c.criuApplyCgroups(cmd.Process.Pid, req) - if err != nil { - return err - } - } - - var extFds []string - if process != nil { - extFds, err = getPipeFds(cmd.Process.Pid) - if err != nil { - return err - } - } - - logrus.Debugf("Using CRIU in %s mode", req.GetType().String()) - // In the case of criurpc.CriuReqType_FEATURE_CHECK req.GetOpts() - // should be empty. For older CRIU versions it still will be - // available but empty. - if req.GetType() != criurpc.CriuReqType_FEATURE_CHECK { - val := reflect.ValueOf(req.GetOpts()) - v := reflect.Indirect(val) - for i := 0; i < v.NumField(); i++ { - st := v.Type() - name := st.Field(i).Name - if strings.HasPrefix(name, "XXX_") { - continue - } - value := val.MethodByName("Get" + name).Call([]reflect.Value{}) - logrus.Debugf("CRIU option %s with value %v", name, value[0]) - } - } - data, err := proto.Marshal(req) - if err != nil { - return err - } - _, err = criuClient.Write(data) - if err != nil { - return err - } - - buf := make([]byte, 10*4096) - for true { - n, err := criuClient.Read(buf) - if err != nil { - return err - } - if n == 0 { - return fmt.Errorf("unexpected EOF") - } - if n == len(buf) { - return fmt.Errorf("buffer is too small") - } - - resp := new(criurpc.CriuResp) - err = proto.Unmarshal(buf[:n], resp) - if err != nil { - return err - } - if !resp.GetSuccess() { - typeString := req.GetType().String() - return fmt.Errorf("criu failed: type %s errno %d\nlog file: %s", typeString, resp.GetCrErrno(), logPath) - } - - t := resp.GetType() - switch { - case t == criurpc.CriuReqType_FEATURE_CHECK: - logrus.Debugf("Feature check says: %s", resp) - criuFeatures = resp.GetFeatures() - break - case t == criurpc.CriuReqType_NOTIFY: - if err := c.criuNotifications(resp, process, opts, extFds); err != nil { - return err - } - t = criurpc.CriuReqType_NOTIFY - req = &criurpc.CriuReq{ - Type: &t, - NotifySuccess: proto.Bool(true), - } - data, err = proto.Marshal(req) - if err != nil { - return err - } - _, err = criuClient.Write(data) - if err != nil { - return err - } - continue - case t == criurpc.CriuReqType_RESTORE: - case t == criurpc.CriuReqType_DUMP: - break - case t == criurpc.CriuReqType_PRE_DUMP: - // In pre-dump mode CRIU is in a loop and waits for - // the final DUMP command. - // The current runc pre-dump approach, however, is - // start criu in PRE_DUMP once for a single pre-dump - // and not the whole series of pre-dump, pre-dump, ...m, dump - // If we got the message CriuReqType_PRE_DUMP it means - // CRIU was successful and we need to forcefully stop CRIU - logrus.Debugf("PRE_DUMP finished. Send close signal to CRIU service") - criuClient.Close() - // Process status won't be success, because one end of sockets is closed - _, err := cmd.Process.Wait() - if err != nil { - logrus.Debugf("After PRE_DUMP CRIU exiting failed") - return err - } - return nil - default: - return fmt.Errorf("unable to parse the response %s", resp.String()) - } - - break - } - - // cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors. - // Here we want to wait only the CRIU process. - st, err := cmd.Process.Wait() - if err != nil { - return err - } - if !st.Success() { - return fmt.Errorf("criu failed: %s\nlog file: %s", st.String(), logPath) - } - return nil -} - -// block any external network activity -func lockNetwork(config *configs.Config) error { - for _, config := range config.Networks { - strategy, err := getStrategy(config.Type) - if err != nil { - return err - } - - if err := strategy.detach(config); err != nil { - return err - } - } - return nil -} - -func unlockNetwork(config *configs.Config) error { - for _, config := range config.Networks { - strategy, err := getStrategy(config.Type) - if err != nil { - return err - } - if err = strategy.attach(config); err != nil { - return err - } - } - return nil -} - -func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, opts *CriuOpts, fds []string) error { - notify := resp.GetNotify() - if notify == nil { - return fmt.Errorf("invalid response: %s", resp.String()) - } - switch { - case notify.GetScript() == "post-dump": - f, err := os.Create(filepath.Join(c.root, "checkpoint")) - if err != nil { - return err - } - f.Close() - case notify.GetScript() == "network-unlock": - if err := unlockNetwork(c.config); err != nil { - return err - } - case notify.GetScript() == "network-lock": - if err := lockNetwork(c.config); err != nil { - return err - } - case notify.GetScript() == "setup-namespaces": - if c.config.Hooks != nil { - s := configs.HookState{ - Version: c.config.Version, - ID: c.id, - Pid: int(notify.GetPid()), - Bundle: utils.SearchLabels(c.config.Labels, "bundle"), - } - for i, hook := range c.config.Hooks.Prestart { - if err := hook.Run(s); err != nil { - return newSystemErrorWithCausef(err, "running prestart hook %d", i) - } - } - } - case notify.GetScript() == "post-restore": - pid := notify.GetPid() - r, err := newRestoredProcess(int(pid), fds) - if err != nil { - return err - } - process.ops = r - if err := c.state.transition(&restoredState{ - imageDir: opts.ImagesDirectory, - c: c, - }); err != nil { - return err - } - // create a timestamp indicating when the restored checkpoint was started - c.created = time.Now().UTC() - if _, err := c.updateState(r); err != nil { - return err - } - if err := os.Remove(filepath.Join(c.root, "checkpoint")); err != nil { - if !os.IsNotExist(err) { - logrus.Error(err) - } - } - } - return nil -} - -func (c *linuxContainer) updateState(process parentProcess) (*State, error) { - c.initProcess = process - state, err := c.currentState() - if err != nil { - return nil, err - } - err = c.saveState(state) - if err != nil { - return nil, err - } - return state, nil -} - -func (c *linuxContainer) saveState(s *State) error { - f, err := os.Create(filepath.Join(c.root, stateFilename)) - if err != nil { - return err - } - defer f.Close() - return utils.WriteJSON(f, s) -} - -func (c *linuxContainer) deleteState() error { - return os.Remove(filepath.Join(c.root, stateFilename)) -} - -func (c *linuxContainer) currentStatus() (Status, error) { - if err := c.refreshState(); err != nil { - return -1, err - } - return c.state.status(), nil -} - -// refreshState needs to be called to verify that the current state on the -// container is what is true. Because consumers of libcontainer can use it -// out of process we need to verify the container's status based on runtime -// information and not rely on our in process info. -func (c *linuxContainer) refreshState() error { - paused, err := c.isPaused() - if err != nil { - return err - } - if paused { - return c.state.transition(&pausedState{c: c}) - } - t, err := c.runType() - if err != nil { - return err - } - switch t { - case Created: - return c.state.transition(&createdState{c: c}) - case Running: - return c.state.transition(&runningState{c: c}) - } - return c.state.transition(&stoppedState{c: c}) -} - -// doesInitProcessExist checks if the init process is still the same process -// as the initial one, it could happen that the original process has exited -// and a new process has been created with the same pid, in this case, the -// container would already be stopped. -func (c *linuxContainer) doesInitProcessExist(initPid int) (bool, error) { - startTime, err := system.GetProcessStartTime(initPid) - if err != nil { - return false, newSystemErrorWithCausef(err, "getting init process %d start time", initPid) - } - if c.initProcessStartTime != startTime { - return false, nil - } - return true, nil -} - -func (c *linuxContainer) runType() (Status, error) { - if c.initProcess == nil { - return Stopped, nil - } - pid := c.initProcess.pid() - // return Running if the init process is alive - if err := syscall.Kill(pid, 0); err != nil { - if err == syscall.ESRCH { - // It means the process does not exist anymore, could happen when the - // process exited just when we call the function, we should not return - // error in this case. - return Stopped, nil - } - return Stopped, newSystemErrorWithCausef(err, "sending signal 0 to pid %d", pid) - } - // check if the process is still the original init process. - exist, err := c.doesInitProcessExist(pid) - if !exist || err != nil { - return Stopped, err - } - // We'll create exec fifo and blocking on it after container is created, - // and delete it after start container. - if _, err := os.Stat(filepath.Join(c.root, execFifoFilename)); err == nil { - return Created, nil - } - return Running, nil -} - -func (c *linuxContainer) isPaused() (bool, error) { - fcg := c.cgroupManager.GetPaths()["freezer"] - if fcg == "" { - // A container doesn't have a freezer cgroup - return false, nil - } - data, err := ioutil.ReadFile(filepath.Join(fcg, "freezer.state")) - if err != nil { - // If freezer cgroup is not mounted, the container would just be not paused. - if os.IsNotExist(err) { - return false, nil - } - return false, newSystemErrorWithCause(err, "checking if container is paused") - } - return bytes.Equal(bytes.TrimSpace(data), []byte("FROZEN")), nil -} - -func (c *linuxContainer) currentState() (*State, error) { - var ( - startTime string - externalDescriptors []string - pid = -1 - ) - if c.initProcess != nil { - pid = c.initProcess.pid() - startTime, _ = c.initProcess.startTime() - externalDescriptors = c.initProcess.externalDescriptors() - } - state := &State{ - BaseState: BaseState{ - ID: c.ID(), - Config: *c.config, - InitProcessPid: pid, - InitProcessStartTime: startTime, - Created: c.created, - }, - Rootless: c.config.Rootless, - CgroupPaths: c.cgroupManager.GetPaths(), - NamespacePaths: make(map[configs.NamespaceType]string), - ExternalDescriptors: externalDescriptors, - } - if pid > 0 { - for _, ns := range c.config.Namespaces { - state.NamespacePaths[ns.Type] = ns.GetPath(pid) - } - for _, nsType := range configs.NamespaceTypes() { - if !configs.IsNamespaceSupported(nsType) { - continue - } - if _, ok := state.NamespacePaths[nsType]; !ok { - ns := configs.Namespace{Type: nsType} - state.NamespacePaths[ns.Type] = ns.GetPath(pid) - } - } - } - return state, nil -} - -// orderNamespacePaths sorts namespace paths into a list of paths that we -// can setns in order. -func (c *linuxContainer) orderNamespacePaths(namespaces map[configs.NamespaceType]string) ([]string, error) { - paths := []string{} - order := []configs.NamespaceType{ - // The user namespace *must* be done first. - configs.NEWUSER, - configs.NEWIPC, - configs.NEWUTS, - configs.NEWNET, - configs.NEWPID, - configs.NEWNS, - } - - // Remove namespaces that we don't need to join. - var nsTypes []configs.NamespaceType - for _, ns := range order { - if c.config.Namespaces.Contains(ns) { - nsTypes = append(nsTypes, ns) - } - } - for _, nsType := range nsTypes { - if p, ok := namespaces[nsType]; ok && p != "" { - // check if the requested namespace is supported - if !configs.IsNamespaceSupported(nsType) { - return nil, newSystemError(fmt.Errorf("namespace %s is not supported", nsType)) - } - // only set to join this namespace if it exists - if _, err := os.Lstat(p); err != nil { - return nil, newSystemErrorWithCausef(err, "running lstat on namespace path %q", p) - } - // do not allow namespace path with comma as we use it to separate - // the namespace paths - if strings.ContainsRune(p, ',') { - return nil, newSystemError(fmt.Errorf("invalid path %s", p)) - } - paths = append(paths, fmt.Sprintf("%s:%s", configs.NsName(nsType), p)) - } - } - return paths, nil -} - -func encodeIDMapping(idMap []configs.IDMap) ([]byte, error) { - data := bytes.NewBuffer(nil) - for _, im := range idMap { - line := fmt.Sprintf("%d %d %d\n", im.ContainerID, im.HostID, im.Size) - if _, err := data.WriteString(line); err != nil { - return nil, err - } - } - return data.Bytes(), nil -} - -// bootstrapData encodes the necessary data in netlink binary format -// as a io.Reader. -// Consumer can write the data to a bootstrap program -// such as one that uses nsenter package to bootstrap the container's -// init process correctly, i.e. with correct namespaces, uid/gid -// mapping etc. -func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.NamespaceType]string) (io.Reader, error) { - // create the netlink message - r := nl.NewNetlinkRequest(int(InitMsg), 0) - - // write cloneFlags - r.AddData(&Int32msg{ - Type: CloneFlagsAttr, - Value: uint32(cloneFlags), - }) - - // write custom namespace paths - if len(nsMaps) > 0 { - nsPaths, err := c.orderNamespacePaths(nsMaps) - if err != nil { - return nil, err - } - r.AddData(&Bytemsg{ - Type: NsPathsAttr, - Value: []byte(strings.Join(nsPaths, ",")), - }) - } - - // write namespace paths only when we are not joining an existing user ns - _, joinExistingUser := nsMaps[configs.NEWUSER] - if !joinExistingUser { - // write uid mappings - if len(c.config.UidMappings) > 0 { - b, err := encodeIDMapping(c.config.UidMappings) - if err != nil { - return nil, err - } - r.AddData(&Bytemsg{ - Type: UidmapAttr, - Value: b, - }) - } - - // write gid mappings - if len(c.config.GidMappings) > 0 { - b, err := encodeIDMapping(c.config.GidMappings) - if err != nil { - return nil, err - } - r.AddData(&Bytemsg{ - Type: GidmapAttr, - Value: b, - }) - // The following only applies if we are root. - if !c.config.Rootless { - // check if we have CAP_SETGID to setgroup properly - pid, err := capability.NewPid(os.Getpid()) - if err != nil { - return nil, err - } - if !pid.Get(capability.EFFECTIVE, capability.CAP_SETGID) { - r.AddData(&Boolmsg{ - Type: SetgroupAttr, - Value: true, - }) - } - } - } - } - - // write oom_score_adj - r.AddData(&Bytemsg{ - Type: OomScoreAdjAttr, - Value: []byte(fmt.Sprintf("%d", c.config.OomScoreAdj)), - }) - - // write rootless - r.AddData(&Boolmsg{ - Type: RootlessAttr, - Value: c.config.Rootless, - }) - - return bytes.NewReader(r.Serialize()), nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go deleted file mode 100644 index bb84ff740..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go +++ /dev/null @@ -1,20 +0,0 @@ -package libcontainer - -// State represents a running container's state -type State struct { - BaseState - - // Platform specific fields below here -} - -// A libcontainer container object. -// -// Each container is thread-safe within the same process. Since a container can -// be destroyed by a separate process, any function may return that the container -// was not found. -type Container interface { - BaseContainer - - // Methods below here are platform specific - -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go deleted file mode 100644 index bb84ff740..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package libcontainer - -// State represents a running container's state -type State struct { - BaseState - - // Platform specific fields below here -} - -// A libcontainer container object. -// -// Each container is thread-safe within the same process. Since a container can -// be destroyed by a separate process, any function may return that the container -// was not found. -type Container interface { - BaseContainer - - // Methods below here are platform specific - -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go deleted file mode 100644 index 9d7d4dc89..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build linux freebsd - -package libcontainer - -// cgroup restoring strategy provided by criu -type cgMode uint32 - -const ( - CRIU_CG_MODE_SOFT cgMode = 3 + iota // restore cgroup properties if only dir created by criu - CRIU_CG_MODE_FULL // always restore all cgroups and their properties - CRIU_CG_MODE_STRICT // restore all, requiring them to not present in the system - CRIU_CG_MODE_DEFAULT // the same as CRIU_CG_MODE_SOFT -) - -type CriuPageServerInfo struct { - Address string // IP address of CRIU page server - Port int32 // port number of CRIU page server -} - -type VethPairName struct { - ContainerInterfaceName string - HostInterfaceName string -} - -type CriuOpts struct { - ImagesDirectory string // directory for storing image files - WorkDirectory string // directory to cd and write logs/pidfiles/stats to - ParentImage string // direcotry for storing parent image files in pre-dump and dump - LeaveRunning bool // leave container in running state after checkpoint - TcpEstablished bool // checkpoint/restore established TCP connections - ExternalUnixConnections bool // allow external unix connections - ShellJob bool // allow to dump and restore shell jobs - FileLocks bool // handle file locks, for safety - PreDump bool // call criu predump to perform iterative checkpoint - PageServer CriuPageServerInfo // allow to dump to criu page server - VethPairs []VethPairName // pass the veth to criu when restore - ManageCgroupsMode cgMode // dump or restore cgroup mode - EmptyNs uint32 // don't c/r properties for namespace from this mask -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go deleted file mode 100644 index bc9207703..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package libcontainer - -// TODO Windows: This can ultimately be entirely factored out as criu is -// a Unix concept not relevant on Windows. -type CriuOpts struct { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/error.go b/vendor/github.com/opencontainers/runc/libcontainer/error.go deleted file mode 100644 index 21a3789ba..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/error.go +++ /dev/null @@ -1,70 +0,0 @@ -package libcontainer - -import "io" - -// ErrorCode is the API error code type. -type ErrorCode int - -// API error codes. -const ( - // Factory errors - IdInUse ErrorCode = iota - InvalidIdFormat - - // Container errors - ContainerNotExists - ContainerPaused - ContainerNotStopped - ContainerNotRunning - ContainerNotPaused - - // Process errors - NoProcessOps - - // Common errors - ConfigInvalid - ConsoleExists - SystemError -) - -func (c ErrorCode) String() string { - switch c { - case IdInUse: - return "Id already in use" - case InvalidIdFormat: - return "Invalid format" - case ContainerPaused: - return "Container paused" - case ConfigInvalid: - return "Invalid configuration" - case SystemError: - return "System error" - case ContainerNotExists: - return "Container does not exist" - case ContainerNotStopped: - return "Container is not stopped" - case ContainerNotRunning: - return "Container is not running" - case ConsoleExists: - return "Console exists for process" - case ContainerNotPaused: - return "Container is not paused" - case NoProcessOps: - return "No process operations" - default: - return "Unknown error" - } -} - -// Error is the API error type. -type Error interface { - error - - // Returns an error if it failed to write the detail of the Error to w. - // The detail of the Error may include the error message and a - // representation of the stack trace. - Detail(w io.Writer) error - - // Returns the error code for this error. - Code() ErrorCode -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/factory.go b/vendor/github.com/opencontainers/runc/libcontainer/factory.go deleted file mode 100644 index 0986cd77e..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/factory.go +++ /dev/null @@ -1,44 +0,0 @@ -package libcontainer - -import ( - "github.com/opencontainers/runc/libcontainer/configs" -) - -type Factory interface { - // Creates a new container with the given id and starts the initial process inside it. - // id must be a string containing only letters, digits and underscores and must contain - // between 1 and 1024 characters, inclusive. - // - // The id must not already be in use by an existing container. Containers created using - // a factory with the same path (and filesystem) must have distinct ids. - // - // Returns the new container with a running process. - // - // errors: - // IdInUse - id is already in use by a container - // InvalidIdFormat - id has incorrect format - // ConfigInvalid - config is invalid - // Systemerror - System error - // - // On error, any partially created container parts are cleaned up (the operation is atomic). - Create(id string, config *configs.Config) (Container, error) - - // Load takes an ID for an existing container and returns the container information - // from the state. This presents a read only view of the container. - // - // errors: - // Path does not exist - // System error - Load(id string) (Container, error) - - // StartInitialization is an internal API to libcontainer used during the reexec of the - // container. - // - // Errors: - // Pipe connection error - // System error - StartInitialization() error - - // Type returns info string about factory type (e.g. lxc, libcontainer...) - Type() string -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go deleted file mode 100644 index 6a0f85583..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go +++ /dev/null @@ -1,337 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "regexp" - "runtime/debug" - "strconv" - "syscall" - - "github.com/docker/docker/pkg/mount" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/cgroups/fs" - "github.com/opencontainers/runc/libcontainer/cgroups/rootless" - "github.com/opencontainers/runc/libcontainer/cgroups/systemd" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/configs/validate" - "github.com/opencontainers/runc/libcontainer/utils" -) - -const ( - stateFilename = "state.json" - execFifoFilename = "exec.fifo" -) - -var ( - idRegex = regexp.MustCompile(`^[\w+-\.]+$`) - maxIdLen = 1024 -) - -// InitArgs returns an options func to configure a LinuxFactory with the -// provided init binary path and arguments. -func InitArgs(args ...string) func(*LinuxFactory) error { - return func(l *LinuxFactory) (err error) { - if len(args) > 0 { - // Resolve relative paths to ensure that its available - // after directory changes. - if args[0], err = filepath.Abs(args[0]); err != nil { - return newGenericError(err, ConfigInvalid) - } - } - - l.InitArgs = args - return nil - } -} - -// SystemdCgroups is an options func to configure a LinuxFactory to return -// containers that use systemd to create and manage cgroups. -func SystemdCgroups(l *LinuxFactory) error { - l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { - return &systemd.Manager{ - Cgroups: config, - Paths: paths, - } - } - return nil -} - -// Cgroupfs is an options func to configure a LinuxFactory to return -// containers that use the native cgroups filesystem implementation to -// create and manage cgroups. -func Cgroupfs(l *LinuxFactory) error { - l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { - return &fs.Manager{ - Cgroups: config, - Paths: paths, - } - } - return nil -} - -// RootlessCgroups is an options func to configure a LinuxFactory to -// return containers that use the "rootless" cgroup manager, which will -// fail to do any operations not possible to do with an unprivileged user. -// It should only be used in conjunction with rootless containers. -func RootlessCgroups(l *LinuxFactory) error { - l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { - return &rootless.Manager{ - Cgroups: config, - Paths: paths, - } - } - return nil -} - -// TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs. -func TmpfsRoot(l *LinuxFactory) error { - mounted, err := mount.Mounted(l.Root) - if err != nil { - return err - } - if !mounted { - if err := syscall.Mount("tmpfs", l.Root, "tmpfs", 0, ""); err != nil { - return err - } - } - return nil -} - -// CriuPath returns an option func to configure a LinuxFactory with the -// provided criupath -func CriuPath(criupath string) func(*LinuxFactory) error { - return func(l *LinuxFactory) error { - l.CriuPath = criupath - return nil - } -} - -// New returns a linux based container factory based in the root directory and -// configures the factory with the provided option funcs. -func New(root string, options ...func(*LinuxFactory) error) (Factory, error) { - if root != "" { - if err := os.MkdirAll(root, 0700); err != nil { - return nil, newGenericError(err, SystemError) - } - } - l := &LinuxFactory{ - Root: root, - InitArgs: []string{"/proc/self/exe", "init"}, - Validator: validate.New(), - CriuPath: "criu", - } - Cgroupfs(l) - for _, opt := range options { - if err := opt(l); err != nil { - return nil, err - } - } - return l, nil -} - -// LinuxFactory implements the default factory interface for linux based systems. -type LinuxFactory struct { - // Root directory for the factory to store state. - Root string - - // InitArgs are arguments for calling the init responsibilities for spawning - // a container. - InitArgs []string - - // CriuPath is the path to the criu binary used for checkpoint and restore of - // containers. - CriuPath string - - // Validator provides validation to container configurations. - Validator validate.Validator - - // NewCgroupsManager returns an initialized cgroups manager for a single container. - NewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager -} - -func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) { - if l.Root == "" { - return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid) - } - if err := l.validateID(id); err != nil { - return nil, err - } - if err := l.Validator.Validate(config); err != nil { - return nil, newGenericError(err, ConfigInvalid) - } - uid, err := config.HostRootUID() - if err != nil { - return nil, newGenericError(err, SystemError) - } - gid, err := config.HostRootGID() - if err != nil { - return nil, newGenericError(err, SystemError) - } - containerRoot := filepath.Join(l.Root, id) - if _, err := os.Stat(containerRoot); err == nil { - return nil, newGenericError(fmt.Errorf("container with id exists: %v", id), IdInUse) - } else if !os.IsNotExist(err) { - return nil, newGenericError(err, SystemError) - } - if err := os.MkdirAll(containerRoot, 0711); err != nil { - return nil, newGenericError(err, SystemError) - } - if err := os.Chown(containerRoot, uid, gid); err != nil { - return nil, newGenericError(err, SystemError) - } - if config.Rootless { - RootlessCgroups(l) - } - c := &linuxContainer{ - id: id, - root: containerRoot, - config: config, - initArgs: l.InitArgs, - criuPath: l.CriuPath, - cgroupManager: l.NewCgroupsManager(config.Cgroups, nil), - } - c.state = &stoppedState{c: c} - return c, nil -} - -func (l *LinuxFactory) Load(id string) (Container, error) { - if l.Root == "" { - return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid) - } - containerRoot := filepath.Join(l.Root, id) - state, err := l.loadState(containerRoot, id) - if err != nil { - return nil, err - } - r := &nonChildProcess{ - processPid: state.InitProcessPid, - processStartTime: state.InitProcessStartTime, - fds: state.ExternalDescriptors, - } - // We have to use the RootlessManager. - if state.Rootless { - RootlessCgroups(l) - } - c := &linuxContainer{ - initProcess: r, - initProcessStartTime: state.InitProcessStartTime, - id: id, - config: &state.Config, - initArgs: l.InitArgs, - criuPath: l.CriuPath, - cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths), - root: containerRoot, - created: state.Created, - } - c.state = &loadedState{c: c} - if err := c.refreshState(); err != nil { - return nil, err - } - return c, nil -} - -func (l *LinuxFactory) Type() string { - return "libcontainer" -} - -// StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state -// This is a low level implementation detail of the reexec and should not be consumed externally -func (l *LinuxFactory) StartInitialization() (err error) { - var ( - pipefd, rootfd int - consoleSocket *os.File - envInitPipe = os.Getenv("_LIBCONTAINER_INITPIPE") - envStateDir = os.Getenv("_LIBCONTAINER_STATEDIR") - envConsole = os.Getenv("_LIBCONTAINER_CONSOLE") - ) - - // Get the INITPIPE. - pipefd, err = strconv.Atoi(envInitPipe) - if err != nil { - return fmt.Errorf("unable to convert _LIBCONTAINER_INITPIPE=%s to int: %s", envInitPipe, err) - } - - var ( - pipe = os.NewFile(uintptr(pipefd), "pipe") - it = initType(os.Getenv("_LIBCONTAINER_INITTYPE")) - ) - defer pipe.Close() - - // Only init processes have STATEDIR. - rootfd = -1 - if it == initStandard { - if rootfd, err = strconv.Atoi(envStateDir); err != nil { - return fmt.Errorf("unable to convert _LIBCONTAINER_STATEDIR=%s to int: %s", envStateDir, err) - } - } - - if envConsole != "" { - console, err := strconv.Atoi(envConsole) - if err != nil { - return fmt.Errorf("unable to convert _LIBCONTAINER_CONSOLE=%s to int: %s", envConsole, err) - } - consoleSocket = os.NewFile(uintptr(console), "console-socket") - defer consoleSocket.Close() - } - - // clear the current process's environment to clean any libcontainer - // specific env vars. - os.Clearenv() - - defer func() { - // We have an error during the initialization of the container's init, - // send it back to the parent process in the form of an initError. - if werr := utils.WriteJSON(pipe, syncT{procError}); werr != nil { - fmt.Fprintln(os.Stderr, err) - return - } - if werr := utils.WriteJSON(pipe, newSystemError(err)); werr != nil { - fmt.Fprintln(os.Stderr, err) - return - } - }() - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("panic from initialization: %v, %v", e, string(debug.Stack())) - } - }() - - i, err := newContainerInit(it, pipe, consoleSocket, rootfd) - if err != nil { - return err - } - - // If Init succeeds, syscall.Exec will not return, hence none of the defers will be called. - return i.Init() -} - -func (l *LinuxFactory) loadState(root, id string) (*State, error) { - f, err := os.Open(filepath.Join(root, stateFilename)) - if err != nil { - if os.IsNotExist(err) { - return nil, newGenericError(fmt.Errorf("container %q does not exist", id), ContainerNotExists) - } - return nil, newGenericError(err, SystemError) - } - defer f.Close() - var state *State - if err := json.NewDecoder(f).Decode(&state); err != nil { - return nil, newGenericError(err, SystemError) - } - return state, nil -} - -func (l *LinuxFactory) validateID(id string) error { - if !idRegex.MatchString(id) { - return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat) - } - if len(id) > maxIdLen { - return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat) - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go b/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go deleted file mode 100644 index 6e7de2fe7..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go +++ /dev/null @@ -1,92 +0,0 @@ -package libcontainer - -import ( - "fmt" - "io" - "text/template" - "time" - - "github.com/opencontainers/runc/libcontainer/stacktrace" -) - -var errorTemplate = template.Must(template.New("error").Parse(`Timestamp: {{.Timestamp}} -Code: {{.ECode}} -{{if .Message }} -Message: {{.Message}} -{{end}} -Frames:{{range $i, $frame := .Stack.Frames}} ---- -{{$i}}: {{$frame.Function}} -Package: {{$frame.Package}} -File: {{$frame.File}}@{{$frame.Line}}{{end}} -`)) - -func newGenericError(err error, c ErrorCode) Error { - if le, ok := err.(Error); ok { - return le - } - gerr := &genericError{ - Timestamp: time.Now(), - Err: err, - ECode: c, - Stack: stacktrace.Capture(1), - } - if err != nil { - gerr.Message = err.Error() - } - return gerr -} - -func newSystemError(err error) Error { - return createSystemError(err, "") -} - -func newSystemErrorWithCausef(err error, cause string, v ...interface{}) Error { - return createSystemError(err, fmt.Sprintf(cause, v...)) -} - -func newSystemErrorWithCause(err error, cause string) Error { - return createSystemError(err, cause) -} - -// createSystemError creates the specified error with the correct number of -// stack frames skipped. This is only to be called by the other functions for -// formatting the error. -func createSystemError(err error, cause string) Error { - gerr := &genericError{ - Timestamp: time.Now(), - Err: err, - ECode: SystemError, - Cause: cause, - Stack: stacktrace.Capture(2), - } - if err != nil { - gerr.Message = err.Error() - } - return gerr -} - -type genericError struct { - Timestamp time.Time - ECode ErrorCode - Err error `json:"-"` - Cause string - Message string - Stack stacktrace.Stacktrace -} - -func (e *genericError) Error() string { - if e.Cause == "" { - return e.Message - } - frame := e.Stack.Frames[0] - return fmt.Sprintf("%s:%d: %s caused %q", frame.File, frame.Line, e.Cause, e.Message) -} - -func (e *genericError) Code() ErrorCode { - return e.ECode -} - -func (e *genericError) Detail(w io.Writer) error { - return errorTemplate.Execute(w, e) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go deleted file mode 100644 index 99cc02cbd..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go +++ /dev/null @@ -1,500 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/json" - "fmt" - "io" - "net" - "os" - "strings" - "syscall" - "unsafe" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/runc/libcontainer/user" - "github.com/opencontainers/runc/libcontainer/utils" - "github.com/vishvananda/netlink" -) - -type initType string - -const ( - initSetns initType = "setns" - initStandard initType = "standard" -) - -type pid struct { - Pid int `json:"pid"` -} - -// network is an internal struct used to setup container networks. -type network struct { - configs.Network - - // TempVethPeerName is a unique temporary veth peer name that was placed into - // the container's namespace. - TempVethPeerName string `json:"temp_veth_peer_name"` -} - -// initConfig is used for transferring parameters from Exec() to Init() -type initConfig struct { - Args []string `json:"args"` - Env []string `json:"env"` - Cwd string `json:"cwd"` - Capabilities *configs.Capabilities `json:"capabilities"` - ProcessLabel string `json:"process_label"` - AppArmorProfile string `json:"apparmor_profile"` - NoNewPrivileges bool `json:"no_new_privileges"` - User string `json:"user"` - AdditionalGroups []string `json:"additional_groups"` - Config *configs.Config `json:"config"` - Networks []*network `json:"network"` - PassedFilesCount int `json:"passed_files_count"` - ContainerId string `json:"containerid"` - Rlimits []configs.Rlimit `json:"rlimits"` - CreateConsole bool `json:"create_console"` - Rootless bool `json:"rootless"` -} - -type initer interface { - Init() error -} - -func newContainerInit(t initType, pipe *os.File, consoleSocket *os.File, stateDirFD int) (initer, error) { - var config *initConfig - if err := json.NewDecoder(pipe).Decode(&config); err != nil { - return nil, err - } - if err := populateProcessEnvironment(config.Env); err != nil { - return nil, err - } - switch t { - case initSetns: - return &linuxSetnsInit{ - pipe: pipe, - consoleSocket: consoleSocket, - config: config, - }, nil - case initStandard: - return &linuxStandardInit{ - pipe: pipe, - consoleSocket: consoleSocket, - parentPid: syscall.Getppid(), - config: config, - stateDirFD: stateDirFD, - }, nil - } - return nil, fmt.Errorf("unknown init type %q", t) -} - -// populateProcessEnvironment loads the provided environment variables into the -// current processes's environment. -func populateProcessEnvironment(env []string) error { - for _, pair := range env { - p := strings.SplitN(pair, "=", 2) - if len(p) < 2 { - return fmt.Errorf("invalid environment '%v'", pair) - } - if err := os.Setenv(p[0], p[1]); err != nil { - return err - } - } - return nil -} - -// finalizeNamespace drops the caps, sets the correct user -// and working dir, and closes any leaked file descriptors -// before executing the command inside the namespace -func finalizeNamespace(config *initConfig) error { - // Ensure that all unwanted fds we may have accidentally - // inherited are marked close-on-exec so they stay out of the - // container - if err := utils.CloseExecFrom(config.PassedFilesCount + 3); err != nil { - return err - } - - capabilities := &configs.Capabilities{} - if config.Capabilities != nil { - capabilities = config.Capabilities - } else if config.Config.Capabilities != nil { - capabilities = config.Config.Capabilities - } - w, err := newContainerCapList(capabilities) - if err != nil { - return err - } - // drop capabilities in bounding set before changing user - if err := w.ApplyBoundingSet(); err != nil { - return err - } - // preserve existing capabilities while we change users - if err := system.SetKeepCaps(); err != nil { - return err - } - if err := setupUser(config); err != nil { - return err - } - if err := system.ClearKeepCaps(); err != nil { - return err - } - if err := w.ApplyCaps(); err != nil { - return err - } - if config.Cwd != "" { - if err := syscall.Chdir(config.Cwd); err != nil { - return fmt.Errorf("chdir to cwd (%q) set in config.json failed: %v", config.Cwd, err) - } - } - return nil -} - -// setupConsole sets up the console from inside the container, and sends the -// master pty fd to the config.Pipe (using cmsg). This is done to ensure that -// consoles are scoped to a container properly (see runc#814 and the many -// issues related to that). This has to be run *after* we've pivoted to the new -// rootfs (and the users' configuration is entirely set up). -func setupConsole(socket *os.File, config *initConfig, mount bool) error { - defer socket.Close() - // At this point, /dev/ptmx points to something that we would expect. We - // used to change the owner of the slave path, but since the /dev/pts mount - // can have gid=X set (at the users' option). So touching the owner of the - // slave PTY is not necessary, as the kernel will handle that for us. Note - // however, that setupUser (specifically fixStdioPermissions) *will* change - // the UID owner of the console to be the user the process will run as (so - // they can actually control their console). - console, err := newConsole() - if err != nil { - return err - } - // After we return from here, we don't need the console anymore. - defer console.Close() - - linuxConsole, ok := console.(*linuxConsole) - if !ok { - return fmt.Errorf("failed to cast console to *linuxConsole") - } - // Mount the console inside our rootfs. - if mount { - if err := linuxConsole.mount(); err != nil { - return err - } - } - // While we can access console.master, using the API is a good idea. - if err := utils.SendFd(socket, linuxConsole.File()); err != nil { - return err - } - // Now, dup over all the things. - return linuxConsole.dupStdio() -} - -// syncParentReady sends to the given pipe a JSON payload which indicates that -// the init is ready to Exec the child process. It then waits for the parent to -// indicate that it is cleared to Exec. -func syncParentReady(pipe io.ReadWriter) error { - // Tell parent. - if err := writeSync(pipe, procReady); err != nil { - return err - } - - // Wait for parent to give the all-clear. - if err := readSync(pipe, procRun); err != nil { - return err - } - - return nil -} - -// syncParentHooks sends to the given pipe a JSON payload which indicates that -// the parent should execute pre-start hooks. It then waits for the parent to -// indicate that it is cleared to resume. -func syncParentHooks(pipe io.ReadWriter) error { - // Tell parent. - if err := writeSync(pipe, procHooks); err != nil { - return err - } - - // Wait for parent to give the all-clear. - if err := readSync(pipe, procResume); err != nil { - return err - } - - return nil -} - -// setupUser changes the groups, gid, and uid for the user inside the container -func setupUser(config *initConfig) error { - // Set up defaults. - defaultExecUser := user.ExecUser{ - Uid: 0, - Gid: 0, - Home: "/", - } - - passwdPath, err := user.GetPasswdPath() - if err != nil { - return err - } - - groupPath, err := user.GetGroupPath() - if err != nil { - return err - } - - execUser, err := user.GetExecUserPath(config.User, &defaultExecUser, passwdPath, groupPath) - if err != nil { - return err - } - - var addGroups []int - if len(config.AdditionalGroups) > 0 { - addGroups, err = user.GetAdditionalGroupsPath(config.AdditionalGroups, groupPath) - if err != nil { - return err - } - } - - if config.Rootless { - if execUser.Uid != 0 { - return fmt.Errorf("cannot run as a non-root user in a rootless container") - } - - if execUser.Gid != 0 { - return fmt.Errorf("cannot run as a non-root group in a rootless container") - } - - // We cannot set any additional groups in a rootless container and thus we - // bail if the user asked us to do so. TODO: We currently can't do this - // earlier, but if libcontainer.Process.User was typesafe this might work. - if len(addGroups) > 0 { - return fmt.Errorf("cannot set any additional groups in a rootless container") - } - } - - // before we change to the container's user make sure that the processes STDIO - // is correctly owned by the user that we are switching to. - if err := fixStdioPermissions(config, execUser); err != nil { - return err - } - - // This isn't allowed in an unprivileged user namespace since Linux 3.19. - // There's nothing we can do about /etc/group entries, so we silently - // ignore setting groups here (since the user didn't explicitly ask us to - // set the group). - if !config.Rootless { - suppGroups := append(execUser.Sgids, addGroups...) - if err := syscall.Setgroups(suppGroups); err != nil { - return err - } - } - - if err := system.Setgid(execUser.Gid); err != nil { - return err - } - - if err := system.Setuid(execUser.Uid); err != nil { - return err - } - - // if we didn't get HOME already, set it based on the user's HOME - if envHome := os.Getenv("HOME"); envHome == "" { - if err := os.Setenv("HOME", execUser.Home); err != nil { - return err - } - } - return nil -} - -// fixStdioPermissions fixes the permissions of PID 1's STDIO within the container to the specified user. -// The ownership needs to match because it is created outside of the container and needs to be -// localized. -func fixStdioPermissions(config *initConfig, u *user.ExecUser) error { - var null syscall.Stat_t - if err := syscall.Stat("/dev/null", &null); err != nil { - return err - } - for _, fd := range []uintptr{ - os.Stdin.Fd(), - os.Stderr.Fd(), - os.Stdout.Fd(), - } { - var s syscall.Stat_t - if err := syscall.Fstat(int(fd), &s); err != nil { - return err - } - - // Skip chown of /dev/null if it was used as one of the STDIO fds. - if s.Rdev == null.Rdev { - continue - } - - // Skip chown if s.Gid is actually an unmapped gid in the host. While - // this is a bit dodgy if it just so happens that the console _is_ - // owned by overflow_gid, there's no way for us to disambiguate this as - // a userspace program. - if _, err := config.Config.HostGID(int(s.Gid)); err != nil { - continue - } - - // We only change the uid owner (as it is possible for the mount to - // prefer a different gid, and there's no reason for us to change it). - // The reason why we don't just leave the default uid=X mount setup is - // that users expect to be able to actually use their console. Without - // this code, you couldn't effectively run as a non-root user inside a - // container and also have a console set up. - if err := syscall.Fchown(int(fd), u.Uid, int(s.Gid)); err != nil { - return err - } - } - return nil -} - -// setupNetwork sets up and initializes any network interface inside the container. -func setupNetwork(config *initConfig) error { - for _, config := range config.Networks { - strategy, err := getStrategy(config.Type) - if err != nil { - return err - } - if err := strategy.initialize(config); err != nil { - return err - } - } - return nil -} - -func setupRoute(config *configs.Config) error { - for _, config := range config.Routes { - _, dst, err := net.ParseCIDR(config.Destination) - if err != nil { - return err - } - src := net.ParseIP(config.Source) - if src == nil { - return fmt.Errorf("Invalid source for route: %s", config.Source) - } - gw := net.ParseIP(config.Gateway) - if gw == nil { - return fmt.Errorf("Invalid gateway for route: %s", config.Gateway) - } - l, err := netlink.LinkByName(config.InterfaceName) - if err != nil { - return err - } - route := &netlink.Route{ - Scope: netlink.SCOPE_UNIVERSE, - Dst: dst, - Src: src, - Gw: gw, - LinkIndex: l.Attrs().Index, - } - if err := netlink.RouteAdd(route); err != nil { - return err - } - } - return nil -} - -func setupRlimits(limits []configs.Rlimit, pid int) error { - for _, rlimit := range limits { - if err := system.Prlimit(pid, rlimit.Type, syscall.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft}); err != nil { - return fmt.Errorf("error setting rlimit type %v: %v", rlimit.Type, err) - } - } - return nil -} - -const _P_PID = 1 - -type siginfo struct { - si_signo int32 - si_errno int32 - si_code int32 - // below here is a union; si_pid is the only field we use - si_pid int32 - // Pad to 128 bytes as detailed in blockUntilWaitable - pad [96]byte -} - -// isWaitable returns true if the process has exited false otherwise. -// Its based off blockUntilWaitable in src/os/wait_waitid.go -func isWaitable(pid int) (bool, error) { - si := &siginfo{} - _, _, e := syscall.Syscall6(syscall.SYS_WAITID, _P_PID, uintptr(pid), uintptr(unsafe.Pointer(si)), syscall.WEXITED|syscall.WNOWAIT|syscall.WNOHANG, 0, 0) - if e != 0 { - return false, os.NewSyscallError("waitid", e) - } - - return si.si_pid != 0, nil -} - -// isNoChildren returns true if err represents a syscall.ECHILD false otherwise -func isNoChildren(err error) bool { - switch err := err.(type) { - case syscall.Errno: - if err == syscall.ECHILD { - return true - } - case *os.SyscallError: - if err.Err == syscall.ECHILD { - return true - } - } - return false -} - -// signalAllProcesses freezes then iterates over all the processes inside the -// manager's cgroups sending the signal s to them. -// If s is SIGKILL then it will wait for each process to exit. -// For all other signals it will check if the process is ready to report its -// exit status and only if it is will a wait be performed. -func signalAllProcesses(m cgroups.Manager, s os.Signal) error { - var procs []*os.Process - if err := m.Freeze(configs.Frozen); err != nil { - logrus.Warn(err) - } - pids, err := m.GetAllPids() - if err != nil { - m.Freeze(configs.Thawed) - return err - } - for _, pid := range pids { - p, err := os.FindProcess(pid) - if err != nil { - logrus.Warn(err) - continue - } - procs = append(procs, p) - if err := p.Signal(s); err != nil { - logrus.Warn(err) - } - } - if err := m.Freeze(configs.Thawed); err != nil { - logrus.Warn(err) - } - - for _, p := range procs { - if s != syscall.SIGKILL { - if ok, err := isWaitable(p.Pid); err != nil { - if !isNoChildren(err) { - logrus.Warn("signalAllProcesses: ", p.Pid, err) - } - continue - } else if !ok { - // Not ready to report so don't wait - continue - } - } - - if _, err := p.Wait(); err != nil { - if !isNoChildren(err) { - logrus.Warn("wait: ", err) - } - } - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go deleted file mode 100644 index bc725a227..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "syscall" - - "github.com/vishvananda/netlink/nl" -) - -// list of known message types we want to send to bootstrap program -// The number is randomly chosen to not conflict with known netlink types -const ( - InitMsg uint16 = 62000 - CloneFlagsAttr uint16 = 27281 - NsPathsAttr uint16 = 27282 - UidmapAttr uint16 = 27283 - GidmapAttr uint16 = 27284 - SetgroupAttr uint16 = 27285 - OomScoreAdjAttr uint16 = 27286 - RootlessAttr uint16 = 27287 - - // When syscall.NLA_HDRLEN is in gccgo, take this out. - syscall_NLA_HDRLEN = (syscall.SizeofNlAttr + syscall.NLA_ALIGNTO - 1) & ^(syscall.NLA_ALIGNTO - 1) -) - -type Int32msg struct { - Type uint16 - Value uint32 -} - -// Serialize serializes the message. -// Int32msg has the following representation -// | nlattr len | nlattr type | -// | uint32 value | -func (msg *Int32msg) Serialize() []byte { - buf := make([]byte, msg.Len()) - native := nl.NativeEndian() - native.PutUint16(buf[0:2], uint16(msg.Len())) - native.PutUint16(buf[2:4], msg.Type) - native.PutUint32(buf[4:8], msg.Value) - return buf -} - -func (msg *Int32msg) Len() int { - return syscall_NLA_HDRLEN + 4 -} - -// Bytemsg has the following representation -// | nlattr len | nlattr type | -// | value | pad | -type Bytemsg struct { - Type uint16 - Value []byte -} - -func (msg *Bytemsg) Serialize() []byte { - l := msg.Len() - buf := make([]byte, (l+syscall.NLA_ALIGNTO-1) & ^(syscall.NLA_ALIGNTO-1)) - native := nl.NativeEndian() - native.PutUint16(buf[0:2], uint16(l)) - native.PutUint16(buf[2:4], msg.Type) - copy(buf[4:], msg.Value) - return buf -} - -func (msg *Bytemsg) Len() int { - return syscall_NLA_HDRLEN + len(msg.Value) + 1 // null-terminated -} - -type Boolmsg struct { - Type uint16 - Value bool -} - -func (msg *Boolmsg) Serialize() []byte { - buf := make([]byte, msg.Len()) - native := nl.NativeEndian() - native.PutUint16(buf[0:2], uint16(msg.Len())) - native.PutUint16(buf[2:4], msg.Type) - if msg.Value { - buf[4] = 1 - } else { - buf[4] = 0 - } - return buf -} - -func (msg *Boolmsg) Len() int { - return syscall_NLA_HDRLEN + 1 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go deleted file mode 100644 index 5075bee4d..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go +++ /dev/null @@ -1,259 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "io/ioutil" - "net" - "path/filepath" - "strconv" - "strings" - - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/utils" - "github.com/vishvananda/netlink" -) - -var strategies = map[string]networkStrategy{ - "veth": &veth{}, - "loopback": &loopback{}, -} - -// networkStrategy represents a specific network configuration for -// a container's networking stack -type networkStrategy interface { - create(*network, int) error - initialize(*network) error - detach(*configs.Network) error - attach(*configs.Network) error -} - -// getStrategy returns the specific network strategy for the -// provided type. -func getStrategy(tpe string) (networkStrategy, error) { - s, exists := strategies[tpe] - if !exists { - return nil, fmt.Errorf("unknown strategy type %q", tpe) - } - return s, nil -} - -// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo. -func getNetworkInterfaceStats(interfaceName string) (*NetworkInterface, error) { - out := &NetworkInterface{Name: interfaceName} - // This can happen if the network runtime information is missing - possible if the - // container was created by an old version of libcontainer. - if interfaceName == "" { - return out, nil - } - type netStatsPair struct { - // Where to write the output. - Out *uint64 - // The network stats file to read. - File string - } - // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. - netStats := []netStatsPair{ - {Out: &out.RxBytes, File: "tx_bytes"}, - {Out: &out.RxPackets, File: "tx_packets"}, - {Out: &out.RxErrors, File: "tx_errors"}, - {Out: &out.RxDropped, File: "tx_dropped"}, - - {Out: &out.TxBytes, File: "rx_bytes"}, - {Out: &out.TxPackets, File: "rx_packets"}, - {Out: &out.TxErrors, File: "rx_errors"}, - {Out: &out.TxDropped, File: "rx_dropped"}, - } - for _, netStat := range netStats { - data, err := readSysfsNetworkStats(interfaceName, netStat.File) - if err != nil { - return nil, err - } - *(netStat.Out) = data - } - return out, nil -} - -// Reads the specified statistics available under /sys/class/net//statistics -func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) { - data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile)) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} - -// loopback is a network strategy that provides a basic loopback device -type loopback struct { -} - -func (l *loopback) create(n *network, nspid int) error { - return nil -} - -func (l *loopback) initialize(config *network) error { - return netlink.LinkSetUp(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "lo"}}) -} - -func (l *loopback) attach(n *configs.Network) (err error) { - return nil -} - -func (l *loopback) detach(n *configs.Network) (err error) { - return nil -} - -// veth is a network strategy that uses a bridge and creates -// a veth pair, one that is attached to the bridge on the host and the other -// is placed inside the container's namespace -type veth struct { -} - -func (v *veth) detach(n *configs.Network) (err error) { - return netlink.LinkSetMaster(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: n.HostInterfaceName}}, nil) -} - -// attach a container network interface to an external network -func (v *veth) attach(n *configs.Network) (err error) { - brl, err := netlink.LinkByName(n.Bridge) - if err != nil { - return err - } - br, ok := brl.(*netlink.Bridge) - if !ok { - return fmt.Errorf("Wrong device type %T", brl) - } - host, err := netlink.LinkByName(n.HostInterfaceName) - if err != nil { - return err - } - - if err := netlink.LinkSetMaster(host, br); err != nil { - return err - } - if err := netlink.LinkSetMTU(host, n.Mtu); err != nil { - return err - } - if n.HairpinMode { - if err := netlink.LinkSetHairpin(host, true); err != nil { - return err - } - } - if err := netlink.LinkSetUp(host); err != nil { - return err - } - - return nil -} - -func (v *veth) create(n *network, nspid int) (err error) { - tmpName, err := v.generateTempPeerName() - if err != nil { - return err - } - n.TempVethPeerName = tmpName - if n.Bridge == "" { - return fmt.Errorf("bridge is not specified") - } - veth := &netlink.Veth{ - LinkAttrs: netlink.LinkAttrs{ - Name: n.HostInterfaceName, - TxQLen: n.TxQueueLen, - }, - PeerName: n.TempVethPeerName, - } - if err := netlink.LinkAdd(veth); err != nil { - return err - } - defer func() { - if err != nil { - netlink.LinkDel(veth) - } - }() - if err := v.attach(&n.Network); err != nil { - return err - } - child, err := netlink.LinkByName(n.TempVethPeerName) - if err != nil { - return err - } - return netlink.LinkSetNsPid(child, nspid) -} - -func (v *veth) generateTempPeerName() (string, error) { - return utils.GenerateRandomName("veth", 7) -} - -func (v *veth) initialize(config *network) error { - peer := config.TempVethPeerName - if peer == "" { - return fmt.Errorf("peer is not specified") - } - child, err := netlink.LinkByName(peer) - if err != nil { - return err - } - if err := netlink.LinkSetDown(child); err != nil { - return err - } - if err := netlink.LinkSetName(child, config.Name); err != nil { - return err - } - // get the interface again after we changed the name as the index also changes. - if child, err = netlink.LinkByName(config.Name); err != nil { - return err - } - if config.MacAddress != "" { - mac, err := net.ParseMAC(config.MacAddress) - if err != nil { - return err - } - if err := netlink.LinkSetHardwareAddr(child, mac); err != nil { - return err - } - } - ip, err := netlink.ParseAddr(config.Address) - if err != nil { - return err - } - if err := netlink.AddrAdd(child, ip); err != nil { - return err - } - if config.IPv6Address != "" { - ip6, err := netlink.ParseAddr(config.IPv6Address) - if err != nil { - return err - } - if err := netlink.AddrAdd(child, ip6); err != nil { - return err - } - } - if err := netlink.LinkSetMTU(child, config.Mtu); err != nil { - return err - } - if err := netlink.LinkSetUp(child); err != nil { - return err - } - if config.Gateway != "" { - gw := net.ParseIP(config.Gateway) - if err := netlink.RouteAdd(&netlink.Route{ - Scope: netlink.SCOPE_UNIVERSE, - LinkIndex: child.Attrs().Index, - Gw: gw, - }); err != nil { - return err - } - } - if config.IPv6Gateway != "" { - gw := net.ParseIP(config.IPv6Gateway) - if err := netlink.RouteAdd(&netlink.Route{ - Scope: netlink.SCOPE_UNIVERSE, - LinkIndex: child.Attrs().Index, - Gw: gw, - }); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go deleted file mode 100644 index 839a50c55..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" -) - -const oomCgroupName = "memory" - -type PressureLevel uint - -const ( - LowPressure PressureLevel = iota - MediumPressure - CriticalPressure -) - -func registerMemoryEvent(cgDir string, evName string, arg string) (<-chan struct{}, error) { - evFile, err := os.Open(filepath.Join(cgDir, evName)) - if err != nil { - return nil, err - } - fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) - if syserr != 0 { - evFile.Close() - return nil, syserr - } - - eventfd := os.NewFile(fd, "eventfd") - - eventControlPath := filepath.Join(cgDir, "cgroup.event_control") - data := fmt.Sprintf("%d %d %s", eventfd.Fd(), evFile.Fd(), arg) - if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil { - eventfd.Close() - evFile.Close() - return nil, err - } - ch := make(chan struct{}) - go func() { - defer func() { - close(ch) - eventfd.Close() - evFile.Close() - }() - buf := make([]byte, 8) - for { - if _, err := eventfd.Read(buf); err != nil { - return - } - // When a cgroup is destroyed, an event is sent to eventfd. - // So if the control path is gone, return instead of notifying. - if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) { - return - } - ch <- struct{}{} - } - }() - return ch, nil -} - -// notifyOnOOM returns channel on which you can expect event about OOM, -// if process died without OOM this channel will be closed. -func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) { - dir := paths[oomCgroupName] - if dir == "" { - return nil, fmt.Errorf("path %q missing", oomCgroupName) - } - - return registerMemoryEvent(dir, "memory.oom_control", "") -} - -func notifyMemoryPressure(paths map[string]string, level PressureLevel) (<-chan struct{}, error) { - dir := paths[oomCgroupName] - if dir == "" { - return nil, fmt.Errorf("path %q missing", oomCgroupName) - } - - if level > CriticalPressure { - return nil, fmt.Errorf("invalid pressure level %d", level) - } - - levelStr := []string{"low", "medium", "critical"}[level] - return registerMemoryEvent(dir, "memory.pressure_level", levelStr) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process.go b/vendor/github.com/opencontainers/runc/libcontainer/process.go deleted file mode 100644 index f1ad08149..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/process.go +++ /dev/null @@ -1,106 +0,0 @@ -package libcontainer - -import ( - "fmt" - "io" - "math" - "os" - - "github.com/opencontainers/runc/libcontainer/configs" -) - -type processOperations interface { - wait() (*os.ProcessState, error) - signal(sig os.Signal) error - pid() int -} - -// Process specifies the configuration and IO for a process inside -// a container. -type Process struct { - // The command to be run followed by any arguments. - Args []string - - // Env specifies the environment variables for the process. - Env []string - - // User will set the uid and gid of the executing process running inside the container - // local to the container's user and group configuration. - User string - - // AdditionalGroups specifies the gids that should be added to supplementary groups - // in addition to those that the user belongs to. - AdditionalGroups []string - - // Cwd will change the processes current working directory inside the container's rootfs. - Cwd string - - // Stdin is a pointer to a reader which provides the standard input stream. - Stdin io.Reader - - // Stdout is a pointer to a writer which receives the standard output stream. - Stdout io.Writer - - // Stderr is a pointer to a writer which receives the standard error stream. - Stderr io.Writer - - // ExtraFiles specifies additional open files to be inherited by the container - ExtraFiles []*os.File - - // Capabilities specify the capabilities to keep when executing the process inside the container - // All capabilities not specified will be dropped from the processes capability mask - Capabilities *configs.Capabilities - - // AppArmorProfile specifies the profile to apply to the process and is - // changed at the time the process is execed - AppArmorProfile string - - // Label specifies the label to apply to the process. It is commonly used by selinux - Label string - - // NoNewPrivileges controls whether processes can gain additional privileges. - NoNewPrivileges *bool - - // Rlimits specifies the resource limits, such as max open files, to set in the container - // If Rlimits are not set, the container will inherit rlimits from the parent process - Rlimits []configs.Rlimit - - // ConsoleSocket provides the masterfd console. - ConsoleSocket *os.File - - ops processOperations -} - -// Wait waits for the process to exit. -// Wait releases any resources associated with the Process -func (p Process) Wait() (*os.ProcessState, error) { - if p.ops == nil { - return nil, newGenericError(fmt.Errorf("invalid process"), NoProcessOps) - } - return p.ops.wait() -} - -// Pid returns the process ID -func (p Process) Pid() (int, error) { - // math.MinInt32 is returned here, because it's invalid value - // for the kill() system call. - if p.ops == nil { - return math.MinInt32, newGenericError(fmt.Errorf("invalid process"), NoProcessOps) - } - return p.ops.pid(), nil -} - -// Signal sends a signal to the Process. -func (p Process) Signal(sig os.Signal) error { - if p.ops == nil { - return newGenericError(fmt.Errorf("invalid process"), NoProcessOps) - } - return p.ops.signal(sig) -} - -// IO holds the process's STDIO -type IO struct { - Stdin io.WriteCloser - Stdout io.ReadCloser - Stderr io.ReadCloser -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go deleted file mode 100644 index bfe99551d..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go +++ /dev/null @@ -1,483 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "strconv" - "syscall" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/runc/libcontainer/utils" -) - -type parentProcess interface { - // pid returns the pid for the running process. - pid() int - - // start starts the process execution. - start() error - - // send a SIGKILL to the process and wait for the exit. - terminate() error - - // wait waits on the process returning the process state. - wait() (*os.ProcessState, error) - - // startTime returns the process start time. - startTime() (string, error) - - signal(os.Signal) error - - externalDescriptors() []string - - setExternalDescriptors(fds []string) -} - -type setnsProcess struct { - cmd *exec.Cmd - parentPipe *os.File - childPipe *os.File - cgroupPaths map[string]string - config *initConfig - fds []string - process *Process - bootstrapData io.Reader -} - -func (p *setnsProcess) startTime() (string, error) { - return system.GetProcessStartTime(p.pid()) -} - -func (p *setnsProcess) signal(sig os.Signal) error { - s, ok := sig.(syscall.Signal) - if !ok { - return errors.New("os: unsupported signal type") - } - return syscall.Kill(p.pid(), s) -} - -func (p *setnsProcess) start() (err error) { - defer p.parentPipe.Close() - err = p.cmd.Start() - p.childPipe.Close() - if err != nil { - return newSystemErrorWithCause(err, "starting setns process") - } - if p.bootstrapData != nil { - if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil { - return newSystemErrorWithCause(err, "copying bootstrap data to pipe") - } - } - if err = p.execSetns(); err != nil { - return newSystemErrorWithCause(err, "executing setns process") - } - // We can't join cgroups if we're in a rootless container. - if !p.config.Rootless && len(p.cgroupPaths) > 0 { - if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil { - return newSystemErrorWithCausef(err, "adding pid %d to cgroups", p.pid()) - } - } - // set rlimits, this has to be done here because we lose permissions - // to raise the limits once we enter a user-namespace - if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil { - return newSystemErrorWithCause(err, "setting rlimits for process") - } - if err := utils.WriteJSON(p.parentPipe, p.config); err != nil { - return newSystemErrorWithCause(err, "writing config to pipe") - } - - ierr := parseSync(p.parentPipe, func(sync *syncT) error { - switch sync.Type { - case procReady: - // This shouldn't happen. - panic("unexpected procReady in setns") - case procHooks: - // This shouldn't happen. - panic("unexpected procHooks in setns") - default: - return newSystemError(fmt.Errorf("invalid JSON payload from child")) - } - }) - - if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil { - return newSystemErrorWithCause(err, "calling shutdown on init pipe") - } - // Must be done after Shutdown so the child will exit and we can wait for it. - if ierr != nil { - p.wait() - return ierr - } - return nil -} - -// execSetns runs the process that executes C code to perform the setns calls -// because setns support requires the C process to fork off a child and perform the setns -// before the go runtime boots, we wait on the process to die and receive the child's pid -// over the provided pipe. -func (p *setnsProcess) execSetns() error { - status, err := p.cmd.Process.Wait() - if err != nil { - p.cmd.Wait() - return newSystemErrorWithCause(err, "waiting on setns process to finish") - } - if !status.Success() { - p.cmd.Wait() - return newSystemError(&exec.ExitError{ProcessState: status}) - } - var pid *pid - if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil { - p.cmd.Wait() - return newSystemErrorWithCause(err, "reading pid from init pipe") - } - process, err := os.FindProcess(pid.Pid) - if err != nil { - return err - } - p.cmd.Process = process - p.process.ops = p - return nil -} - -// terminate sends a SIGKILL to the forked process for the setns routine then waits to -// avoid the process becoming a zombie. -func (p *setnsProcess) terminate() error { - if p.cmd.Process == nil { - return nil - } - err := p.cmd.Process.Kill() - if _, werr := p.wait(); err == nil { - err = werr - } - return err -} - -func (p *setnsProcess) wait() (*os.ProcessState, error) { - err := p.cmd.Wait() - - // Return actual ProcessState even on Wait error - return p.cmd.ProcessState, err -} - -func (p *setnsProcess) pid() int { - return p.cmd.Process.Pid -} - -func (p *setnsProcess) externalDescriptors() []string { - return p.fds -} - -func (p *setnsProcess) setExternalDescriptors(newFds []string) { - p.fds = newFds -} - -type initProcess struct { - cmd *exec.Cmd - parentPipe *os.File - childPipe *os.File - config *initConfig - manager cgroups.Manager - container *linuxContainer - fds []string - process *Process - bootstrapData io.Reader - sharePidns bool - rootDir *os.File -} - -func (p *initProcess) pid() int { - return p.cmd.Process.Pid -} - -func (p *initProcess) externalDescriptors() []string { - return p.fds -} - -// execSetns runs the process that executes C code to perform the setns calls -// because setns support requires the C process to fork off a child and perform the setns -// before the go runtime boots, we wait on the process to die and receive the child's pid -// over the provided pipe. -// This is called by initProcess.start function -func (p *initProcess) execSetns() error { - status, err := p.cmd.Process.Wait() - if err != nil { - p.cmd.Wait() - return err - } - if !status.Success() { - p.cmd.Wait() - return &exec.ExitError{ProcessState: status} - } - var pid *pid - if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil { - p.cmd.Wait() - return err - } - process, err := os.FindProcess(pid.Pid) - if err != nil { - return err - } - p.cmd.Process = process - p.process.ops = p - return nil -} - -func (p *initProcess) start() error { - defer p.parentPipe.Close() - err := p.cmd.Start() - p.process.ops = p - p.childPipe.Close() - p.rootDir.Close() - if err != nil { - p.process.ops = nil - return newSystemErrorWithCause(err, "starting init process command") - } - if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil { - return newSystemErrorWithCause(err, "copying bootstrap data to pipe") - } - if err := p.execSetns(); err != nil { - return newSystemErrorWithCause(err, "running exec setns process for init") - } - // Save the standard descriptor names before the container process - // can potentially move them (e.g., via dup2()). If we don't do this now, - // we won't know at checkpoint time which file descriptor to look up. - fds, err := getPipeFds(p.pid()) - if err != nil { - return newSystemErrorWithCausef(err, "getting pipe fds for pid %d", p.pid()) - } - p.setExternalDescriptors(fds) - // Do this before syncing with child so that no children can escape the - // cgroup. We don't need to worry about not doing this and not being root - // because we'd be using the rootless cgroup manager in that case. - if err := p.manager.Apply(p.pid()); err != nil { - return newSystemErrorWithCause(err, "applying cgroup configuration for process") - } - defer func() { - if err != nil { - // TODO: should not be the responsibility to call here - p.manager.Destroy() - } - }() - if err := p.createNetworkInterfaces(); err != nil { - return newSystemErrorWithCause(err, "creating network interfaces") - } - if err := p.sendConfig(); err != nil { - return newSystemErrorWithCause(err, "sending config to init process") - } - var ( - sentRun bool - sentResume bool - ) - - ierr := parseSync(p.parentPipe, func(sync *syncT) error { - switch sync.Type { - case procReady: - if err := p.manager.Set(p.config.Config); err != nil { - return newSystemErrorWithCause(err, "setting cgroup config for ready process") - } - // set rlimits, this has to be done here because we lose permissions - // to raise the limits once we enter a user-namespace - if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil { - return newSystemErrorWithCause(err, "setting rlimits for ready process") - } - // call prestart hooks - if !p.config.Config.Namespaces.Contains(configs.NEWNS) { - if p.config.Config.Hooks != nil { - s := configs.HookState{ - Version: p.container.config.Version, - ID: p.container.id, - Pid: p.pid(), - Bundle: utils.SearchLabels(p.config.Config.Labels, "bundle"), - } - for i, hook := range p.config.Config.Hooks.Prestart { - if err := hook.Run(s); err != nil { - return newSystemErrorWithCausef(err, "running prestart hook %d", i) - } - } - } - } - // Sync with child. - if err := writeSync(p.parentPipe, procRun); err != nil { - return newSystemErrorWithCause(err, "writing syncT 'run'") - } - sentRun = true - case procHooks: - if p.config.Config.Hooks != nil { - s := configs.HookState{ - Version: p.container.config.Version, - ID: p.container.id, - Pid: p.pid(), - Bundle: utils.SearchLabels(p.config.Config.Labels, "bundle"), - } - for i, hook := range p.config.Config.Hooks.Prestart { - if err := hook.Run(s); err != nil { - return newSystemErrorWithCausef(err, "running prestart hook %d", i) - } - } - } - // Sync with child. - if err := writeSync(p.parentPipe, procResume); err != nil { - return newSystemErrorWithCause(err, "writing syncT 'resume'") - } - sentResume = true - default: - return newSystemError(fmt.Errorf("invalid JSON payload from child")) - } - - return nil - }) - - if !sentRun { - return newSystemErrorWithCause(ierr, "container init") - } - if p.config.Config.Namespaces.Contains(configs.NEWNS) && !sentResume { - return newSystemError(fmt.Errorf("could not synchronise after executing prestart hooks with container process")) - } - if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil { - return newSystemErrorWithCause(err, "shutting down init pipe") - } - - // Must be done after Shutdown so the child will exit and we can wait for it. - if ierr != nil { - p.wait() - return ierr - } - return nil -} - -func (p *initProcess) wait() (*os.ProcessState, error) { - err := p.cmd.Wait() - if err != nil { - return p.cmd.ProcessState, err - } - // we should kill all processes in cgroup when init is died if we use host PID namespace - if p.sharePidns { - signalAllProcesses(p.manager, syscall.SIGKILL) - } - return p.cmd.ProcessState, nil -} - -func (p *initProcess) terminate() error { - if p.cmd.Process == nil { - return nil - } - err := p.cmd.Process.Kill() - if _, werr := p.wait(); err == nil { - err = werr - } - return err -} - -func (p *initProcess) startTime() (string, error) { - return system.GetProcessStartTime(p.pid()) -} - -func (p *initProcess) sendConfig() error { - // send the config to the container's init process, we don't use JSON Encode - // here because there might be a problem in JSON decoder in some cases, see: - // https://github.com/docker/docker/issues/14203#issuecomment-174177790 - return utils.WriteJSON(p.parentPipe, p.config) -} - -func (p *initProcess) createNetworkInterfaces() error { - for _, config := range p.config.Config.Networks { - strategy, err := getStrategy(config.Type) - if err != nil { - return err - } - n := &network{ - Network: *config, - } - if err := strategy.create(n, p.pid()); err != nil { - return err - } - p.config.Networks = append(p.config.Networks, n) - } - return nil -} - -func (p *initProcess) signal(sig os.Signal) error { - s, ok := sig.(syscall.Signal) - if !ok { - return errors.New("os: unsupported signal type") - } - return syscall.Kill(p.pid(), s) -} - -func (p *initProcess) setExternalDescriptors(newFds []string) { - p.fds = newFds -} - -func getPipeFds(pid int) ([]string, error) { - fds := make([]string, 3) - - dirPath := filepath.Join("/proc", strconv.Itoa(pid), "/fd") - for i := 0; i < 3; i++ { - // XXX: This breaks if the path is not a valid symlink (which can - // happen in certain particularly unlucky mount namespace setups). - f := filepath.Join(dirPath, strconv.Itoa(i)) - target, err := os.Readlink(f) - if err != nil { - // Ignore permission errors, for rootless containers and other - // non-dumpable processes. if we can't get the fd for a particular - // file, there's not much we can do. - if os.IsPermission(err) { - continue - } - return fds, err - } - fds[i] = target - } - return fds, nil -} - -// InitializeIO creates pipes for use with the process's stdio and returns the -// opposite side for each. Do not use this if you want to have a pseudoterminal -// set up for you by libcontainer (TODO: fix that too). -// TODO: This is mostly unnecessary, and should be handled by clients. -func (p *Process) InitializeIO(rootuid, rootgid int) (i *IO, err error) { - var fds []uintptr - i = &IO{} - // cleanup in case of an error - defer func() { - if err != nil { - for _, fd := range fds { - syscall.Close(int(fd)) - } - } - }() - // STDIN - r, w, err := os.Pipe() - if err != nil { - return nil, err - } - fds = append(fds, r.Fd(), w.Fd()) - p.Stdin, i.Stdin = r, w - // STDOUT - if r, w, err = os.Pipe(); err != nil { - return nil, err - } - fds = append(fds, r.Fd(), w.Fd()) - p.Stdout, i.Stdout = w, r - // STDERR - if r, w, err = os.Pipe(); err != nil { - return nil, err - } - fds = append(fds, r.Fd(), w.Fd()) - p.Stderr, i.Stderr = w, r - // change ownership of the pipes incase we are in a user namespace - for _, fd := range fds { - if err := syscall.Fchown(int(fd), rootuid, rootgid); err != nil { - return nil, err - } - } - return i, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/restored_process.go b/vendor/github.com/opencontainers/runc/libcontainer/restored_process.go deleted file mode 100644 index a96f4ca5f..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/restored_process.go +++ /dev/null @@ -1,122 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - - "github.com/opencontainers/runc/libcontainer/system" -) - -func newRestoredProcess(pid int, fds []string) (*restoredProcess, error) { - var ( - err error - ) - proc, err := os.FindProcess(pid) - if err != nil { - return nil, err - } - started, err := system.GetProcessStartTime(pid) - if err != nil { - return nil, err - } - return &restoredProcess{ - proc: proc, - processStartTime: started, - fds: fds, - }, nil -} - -type restoredProcess struct { - proc *os.Process - processStartTime string - fds []string -} - -func (p *restoredProcess) start() error { - return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError) -} - -func (p *restoredProcess) pid() int { - return p.proc.Pid -} - -func (p *restoredProcess) terminate() error { - err := p.proc.Kill() - if _, werr := p.wait(); err == nil { - err = werr - } - return err -} - -func (p *restoredProcess) wait() (*os.ProcessState, error) { - // TODO: how do we wait on the actual process? - // maybe use --exec-cmd in criu - st, err := p.proc.Wait() - if err != nil { - return nil, err - } - return st, nil -} - -func (p *restoredProcess) startTime() (string, error) { - return p.processStartTime, nil -} - -func (p *restoredProcess) signal(s os.Signal) error { - return p.proc.Signal(s) -} - -func (p *restoredProcess) externalDescriptors() []string { - return p.fds -} - -func (p *restoredProcess) setExternalDescriptors(newFds []string) { - p.fds = newFds -} - -// nonChildProcess represents a process where the calling process is not -// the parent process. This process is created when a factory loads a container from -// a persisted state. -type nonChildProcess struct { - processPid int - processStartTime string - fds []string -} - -func (p *nonChildProcess) start() error { - return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError) -} - -func (p *nonChildProcess) pid() int { - return p.processPid -} - -func (p *nonChildProcess) terminate() error { - return newGenericError(fmt.Errorf("restored process cannot be terminated"), SystemError) -} - -func (p *nonChildProcess) wait() (*os.ProcessState, error) { - return nil, newGenericError(fmt.Errorf("restored process cannot be waited on"), SystemError) -} - -func (p *nonChildProcess) startTime() (string, error) { - return p.processStartTime, nil -} - -func (p *nonChildProcess) signal(s os.Signal) error { - proc, err := os.FindProcess(p.processPid) - if err != nil { - return err - } - return proc.Signal(s) -} - -func (p *nonChildProcess) externalDescriptors() []string { - return p.fds -} - -func (p *nonChildProcess) setExternalDescriptors(newFds []string) { - p.fds = newFds -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go deleted file mode 100644 index d507373fa..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go +++ /dev/null @@ -1,812 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/symlink" - "github.com/mrunalp/fileutils" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/system" - libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" - "github.com/opencontainers/selinux/go-selinux/label" -) - -const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV - -// needsSetupDev returns true if /dev needs to be set up. -func needsSetupDev(config *configs.Config) bool { - for _, m := range config.Mounts { - if m.Device == "bind" && libcontainerUtils.CleanPath(m.Destination) == "/dev" { - return false - } - } - return true -} - -// prepareRootfs sets up the devices, mount points, and filesystems for use -// inside a new mount namespace. It doesn't set anything as ro or pivot_root, -// because console setup happens inside the caller. You must call -// finalizeRootfs in order to finish the rootfs setup. -func prepareRootfs(pipe io.ReadWriter, config *configs.Config) (err error) { - if err := prepareRoot(config); err != nil { - return newSystemErrorWithCause(err, "preparing rootfs") - } - - setupDev := needsSetupDev(config) - for _, m := range config.Mounts { - for _, precmd := range m.PremountCmds { - if err := mountCmd(precmd); err != nil { - return newSystemErrorWithCause(err, "running premount command") - } - } - - if err := mountToRootfs(m, config.Rootfs, config.MountLabel); err != nil { - return newSystemErrorWithCausef(err, "mounting %q to rootfs %q at %q", m.Source, config.Rootfs, m.Destination) - } - - for _, postcmd := range m.PostmountCmds { - if err := mountCmd(postcmd); err != nil { - return newSystemErrorWithCause(err, "running postmount command") - } - } - } - - if setupDev { - if err := createDevices(config); err != nil { - return newSystemErrorWithCause(err, "creating device nodes") - } - if err := setupPtmx(config); err != nil { - return newSystemErrorWithCause(err, "setting up ptmx") - } - if err := setupDevSymlinks(config.Rootfs); err != nil { - return newSystemErrorWithCause(err, "setting up /dev symlinks") - } - } - - // Signal the parent to run the pre-start hooks. - // The hooks are run after the mounts are setup, but before we switch to the new - // root, so that the old root is still available in the hooks for any mount - // manipulations. - if err := syncParentHooks(pipe); err != nil { - return err - } - - // The reason these operations are done here rather than in finalizeRootfs - // is because the console-handling code gets quite sticky if we have to set - // up the console before doing the pivot_root(2). This is because the - // Console API has to also work with the ExecIn case, which means that the - // API must be able to deal with being inside as well as outside the - // container. It's just cleaner to do this here (at the expense of the - // operation not being perfectly split). - - if err := syscall.Chdir(config.Rootfs); err != nil { - return newSystemErrorWithCausef(err, "changing dir to %q", config.Rootfs) - } - - if config.NoPivotRoot { - err = msMoveRoot(config.Rootfs) - } else { - err = pivotRoot(config.Rootfs) - } - if err != nil { - return newSystemErrorWithCause(err, "jailing process inside rootfs") - } - - if setupDev { - if err := reOpenDevNull(); err != nil { - return newSystemErrorWithCause(err, "reopening /dev/null inside container") - } - } - - return nil -} - -// finalizeRootfs actually switches the root of the process and sets anything -// to ro if necessary. You must call prepareRootfs first. -func finalizeRootfs(config *configs.Config) (err error) { - // remount dev as ro if specified - for _, m := range config.Mounts { - if libcontainerUtils.CleanPath(m.Destination) == "/dev" { - if m.Flags&syscall.MS_RDONLY == syscall.MS_RDONLY { - if err := remountReadonly(m); err != nil { - return newSystemErrorWithCausef(err, "remounting %q as readonly", m.Destination) - } - } - break - } - } - - // set rootfs ( / ) as readonly - if config.Readonlyfs { - if err := setReadonly(); err != nil { - return newSystemErrorWithCause(err, "setting rootfs as readonly") - } - } - - syscall.Umask(0022) - return nil -} - -func mountCmd(cmd configs.Command) error { - command := exec.Command(cmd.Path, cmd.Args[:]...) - command.Env = cmd.Env - command.Dir = cmd.Dir - if out, err := command.CombinedOutput(); err != nil { - return fmt.Errorf("%#v failed: %s: %v", cmd, string(out), err) - } - return nil -} - -func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error { - var ( - dest = m.Destination - ) - if !strings.HasPrefix(dest, rootfs) { - dest = filepath.Join(rootfs, dest) - } - - switch m.Device { - case "proc", "sysfs": - if err := os.MkdirAll(dest, 0755); err != nil { - return err - } - // Selinux kernels do not support labeling of /proc or /sys - return mountPropagate(m, rootfs, "") - case "mqueue": - if err := os.MkdirAll(dest, 0755); err != nil { - return err - } - if err := mountPropagate(m, rootfs, mountLabel); err != nil { - // older kernels do not support labeling of /dev/mqueue - if err := mountPropagate(m, rootfs, ""); err != nil { - return err - } - return label.SetFileLabel(dest, mountLabel) - } - return nil - case "tmpfs": - copyUp := m.Extensions&configs.EXT_COPYUP == configs.EXT_COPYUP - tmpDir := "" - stat, err := os.Stat(dest) - if err != nil { - if err := os.MkdirAll(dest, 0755); err != nil { - return err - } - } - if copyUp { - tmpDir, err = ioutil.TempDir("/tmp", "runctmpdir") - if err != nil { - return newSystemErrorWithCause(err, "tmpcopyup: failed to create tmpdir") - } - defer os.RemoveAll(tmpDir) - m.Destination = tmpDir - } - if err := mountPropagate(m, rootfs, mountLabel); err != nil { - return err - } - if copyUp { - if err := fileutils.CopyDirectory(dest, tmpDir); err != nil { - errMsg := fmt.Errorf("tmpcopyup: failed to copy %s to %s: %v", dest, tmpDir, err) - if err1 := syscall.Unmount(tmpDir, syscall.MNT_DETACH); err1 != nil { - return newSystemErrorWithCausef(err1, "tmpcopyup: %v: failed to unmount", errMsg) - } - return errMsg - } - if err := syscall.Mount(tmpDir, dest, "", syscall.MS_MOVE, ""); err != nil { - errMsg := fmt.Errorf("tmpcopyup: failed to move mount %s to %s: %v", tmpDir, dest, err) - if err1 := syscall.Unmount(tmpDir, syscall.MNT_DETACH); err1 != nil { - return newSystemErrorWithCausef(err1, "tmpcopyup: %v: failed to unmount", errMsg) - } - return errMsg - } - } - if stat != nil { - if err = os.Chmod(dest, stat.Mode()); err != nil { - return err - } - } - return nil - case "bind": - stat, err := os.Stat(m.Source) - if err != nil { - // error out if the source of a bind mount does not exist as we will be - // unable to bind anything to it. - return err - } - // ensure that the destination of the bind mount is resolved of symlinks at mount time because - // any previous mounts can invalidate the next mount's destination. - // this can happen when a user specifies mounts within other mounts to cause breakouts or other - // evil stuff to try to escape the container's rootfs. - if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil { - return err - } - if err := checkMountDestination(rootfs, dest); err != nil { - return err - } - // update the mount with the correct dest after symlinks are resolved. - m.Destination = dest - if err := createIfNotExists(dest, stat.IsDir()); err != nil { - return err - } - if err := mountPropagate(m, rootfs, mountLabel); err != nil { - return err - } - // bind mount won't change mount options, we need remount to make mount options effective. - // first check that we have non-default options required before attempting a remount - if m.Flags&^(syscall.MS_REC|syscall.MS_REMOUNT|syscall.MS_BIND) != 0 { - // only remount if unique mount options are set - if err := remount(m, rootfs); err != nil { - return err - } - } - - if m.Relabel != "" { - if err := label.Validate(m.Relabel); err != nil { - return err - } - shared := label.IsShared(m.Relabel) - if err := label.Relabel(m.Source, mountLabel, shared); err != nil { - return err - } - } - case "cgroup": - binds, err := getCgroupMounts(m) - if err != nil { - return err - } - var merged []string - for _, b := range binds { - ss := filepath.Base(b.Destination) - if strings.Contains(ss, ",") { - merged = append(merged, ss) - } - } - tmpfs := &configs.Mount{ - Source: "tmpfs", - Device: "tmpfs", - Destination: m.Destination, - Flags: defaultMountFlags, - Data: "mode=755", - PropagationFlags: m.PropagationFlags, - } - if err := mountToRootfs(tmpfs, rootfs, mountLabel); err != nil { - return err - } - for _, b := range binds { - if err := mountToRootfs(b, rootfs, mountLabel); err != nil { - return err - } - } - for _, mc := range merged { - for _, ss := range strings.Split(mc, ",") { - // symlink(2) is very dumb, it will just shove the path into - // the link and doesn't do any checks or relative path - // conversion. Also, don't error out if the cgroup already exists. - if err := os.Symlink(mc, filepath.Join(rootfs, m.Destination, ss)); err != nil && !os.IsExist(err) { - return err - } - } - } - if m.Flags&syscall.MS_RDONLY != 0 { - // remount cgroup root as readonly - mcgrouproot := &configs.Mount{ - Source: m.Destination, - Device: "bind", - Destination: m.Destination, - Flags: defaultMountFlags | syscall.MS_RDONLY | syscall.MS_BIND, - } - if err := remount(mcgrouproot, rootfs); err != nil { - return err - } - } - default: - // ensure that the destination of the mount is resolved of symlinks at mount time because - // any previous mounts can invalidate the next mount's destination. - // this can happen when a user specifies mounts within other mounts to cause breakouts or other - // evil stuff to try to escape the container's rootfs. - var err error - if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil { - return err - } - if err := checkMountDestination(rootfs, dest); err != nil { - return err - } - // update the mount with the correct dest after symlinks are resolved. - m.Destination = dest - if err := os.MkdirAll(dest, 0755); err != nil { - return err - } - return mountPropagate(m, rootfs, mountLabel) - } - return nil -} - -func getCgroupMounts(m *configs.Mount) ([]*configs.Mount, error) { - mounts, err := cgroups.GetCgroupMounts(false) - if err != nil { - return nil, err - } - - cgroupPaths, err := cgroups.ParseCgroupFile("/proc/self/cgroup") - if err != nil { - return nil, err - } - - var binds []*configs.Mount - - for _, mm := range mounts { - dir, err := mm.GetOwnCgroup(cgroupPaths) - if err != nil { - return nil, err - } - relDir, err := filepath.Rel(mm.Root, dir) - if err != nil { - return nil, err - } - binds = append(binds, &configs.Mount{ - Device: "bind", - Source: filepath.Join(mm.Mountpoint, relDir), - Destination: filepath.Join(m.Destination, filepath.Base(mm.Mountpoint)), - Flags: syscall.MS_BIND | syscall.MS_REC | m.Flags, - PropagationFlags: m.PropagationFlags, - }) - } - - return binds, nil -} - -// checkMountDestination checks to ensure that the mount destination is not over the top of /proc. -// dest is required to be an abs path and have any symlinks resolved before calling this function. -func checkMountDestination(rootfs, dest string) error { - invalidDestinations := []string{ - "/proc", - } - // White list, it should be sub directories of invalid destinations - validDestinations := []string{ - // These entries can be bind mounted by files emulated by fuse, - // so commands like top, free displays stats in container. - "/proc/cpuinfo", - "/proc/diskstats", - "/proc/meminfo", - "/proc/stat", - "/proc/swaps", - "/proc/uptime", - "/proc/net/dev", - } - for _, valid := range validDestinations { - path, err := filepath.Rel(filepath.Join(rootfs, valid), dest) - if err != nil { - return err - } - if path == "." { - return nil - } - } - for _, invalid := range invalidDestinations { - path, err := filepath.Rel(filepath.Join(rootfs, invalid), dest) - if err != nil { - return err - } - if path == "." || !strings.HasPrefix(path, "..") { - return fmt.Errorf("%q cannot be mounted because it is located inside %q", dest, invalid) - } - } - return nil -} - -func setupDevSymlinks(rootfs string) error { - var links = [][2]string{ - {"/proc/self/fd", "/dev/fd"}, - {"/proc/self/fd/0", "/dev/stdin"}, - {"/proc/self/fd/1", "/dev/stdout"}, - {"/proc/self/fd/2", "/dev/stderr"}, - } - // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink - // in /dev if it exists in /proc. - if _, err := os.Stat("/proc/kcore"); err == nil { - links = append(links, [2]string{"/proc/kcore", "/dev/core"}) - } - for _, link := range links { - var ( - src = link[0] - dst = filepath.Join(rootfs, link[1]) - ) - if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) { - return fmt.Errorf("symlink %s %s %s", src, dst, err) - } - } - return nil -} - -// If stdin, stdout, and/or stderr are pointing to `/dev/null` in the parent's rootfs -// this method will make them point to `/dev/null` in this container's rootfs. This -// needs to be called after we chroot/pivot into the container's rootfs so that any -// symlinks are resolved locally. -func reOpenDevNull() error { - var stat, devNullStat syscall.Stat_t - file, err := os.OpenFile("/dev/null", os.O_RDWR, 0) - if err != nil { - return fmt.Errorf("Failed to open /dev/null - %s", err) - } - defer file.Close() - if err := syscall.Fstat(int(file.Fd()), &devNullStat); err != nil { - return err - } - for fd := 0; fd < 3; fd++ { - if err := syscall.Fstat(fd, &stat); err != nil { - return err - } - if stat.Rdev == devNullStat.Rdev { - // Close and re-open the fd. - if err := syscall.Dup3(int(file.Fd()), fd, 0); err != nil { - return err - } - } - } - return nil -} - -// Create the device nodes in the container. -func createDevices(config *configs.Config) error { - useBindMount := system.RunningInUserNS() || config.Namespaces.Contains(configs.NEWUSER) - oldMask := syscall.Umask(0000) - for _, node := range config.Devices { - // containers running in a user namespace are not allowed to mknod - // devices so we can just bind mount it from the host. - if err := createDeviceNode(config.Rootfs, node, useBindMount); err != nil { - syscall.Umask(oldMask) - return err - } - } - syscall.Umask(oldMask) - return nil -} - -func bindMountDeviceNode(dest string, node *configs.Device) error { - f, err := os.Create(dest) - if err != nil && !os.IsExist(err) { - return err - } - if f != nil { - f.Close() - } - return syscall.Mount(node.Path, dest, "bind", syscall.MS_BIND, "") -} - -// Creates the device node in the rootfs of the container. -func createDeviceNode(rootfs string, node *configs.Device, bind bool) error { - dest := filepath.Join(rootfs, node.Path) - if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil { - return err - } - - if bind { - return bindMountDeviceNode(dest, node) - } - if err := mknodDevice(dest, node); err != nil { - if os.IsExist(err) { - return nil - } else if os.IsPermission(err) { - return bindMountDeviceNode(dest, node) - } - return err - } - return nil -} - -func mknodDevice(dest string, node *configs.Device) error { - fileMode := node.FileMode - switch node.Type { - case 'c', 'u': - fileMode |= syscall.S_IFCHR - case 'b': - fileMode |= syscall.S_IFBLK - case 'p': - fileMode |= syscall.S_IFIFO - default: - return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path) - } - if err := syscall.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil { - return err - } - return syscall.Chown(dest, int(node.Uid), int(node.Gid)) -} - -func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { - for _, m := range mountinfo { - if m.Mountpoint == dir { - return m - } - } - return nil -} - -// Get the parent mount point of directory passed in as argument. Also return -// optional fields. -func getParentMount(rootfs string) (string, string, error) { - var path string - - mountinfos, err := mount.GetMounts() - if err != nil { - return "", "", err - } - - mountinfo := getMountInfo(mountinfos, rootfs) - if mountinfo != nil { - return rootfs, mountinfo.Optional, nil - } - - path = rootfs - for { - path = filepath.Dir(path) - - mountinfo = getMountInfo(mountinfos, path) - if mountinfo != nil { - return path, mountinfo.Optional, nil - } - - if path == "/" { - break - } - } - - // If we are here, we did not find parent mount. Something is wrong. - return "", "", fmt.Errorf("Could not find parent mount of %s", rootfs) -} - -// Make parent mount private if it was shared -func rootfsParentMountPrivate(rootfs string) error { - sharedMount := false - - parentMount, optionalOpts, err := getParentMount(rootfs) - if err != nil { - return err - } - - optsSplit := strings.Split(optionalOpts, " ") - for _, opt := range optsSplit { - if strings.HasPrefix(opt, "shared:") { - sharedMount = true - break - } - } - - // Make parent mount PRIVATE if it was shared. It is needed for two - // reasons. First of all pivot_root() will fail if parent mount is - // shared. Secondly when we bind mount rootfs it will propagate to - // parent namespace and we don't want that to happen. - if sharedMount { - return syscall.Mount("", parentMount, "", syscall.MS_PRIVATE, "") - } - - return nil -} - -func prepareRoot(config *configs.Config) error { - flag := syscall.MS_SLAVE | syscall.MS_REC - if config.RootPropagation != 0 { - flag = config.RootPropagation - } - if err := syscall.Mount("", "/", "", uintptr(flag), ""); err != nil { - return err - } - - // Make parent mount private to make sure following bind mount does - // not propagate in other namespaces. Also it will help with kernel - // check pass in pivot_root. (IS_SHARED(new_mnt->mnt_parent)) - if err := rootfsParentMountPrivate(config.Rootfs); err != nil { - return err - } - - return syscall.Mount(config.Rootfs, config.Rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, "") -} - -func setReadonly() error { - return syscall.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "") -} - -func setupPtmx(config *configs.Config) error { - ptmx := filepath.Join(config.Rootfs, "dev/ptmx") - if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { - return err - } - if err := os.Symlink("pts/ptmx", ptmx); err != nil { - return fmt.Errorf("symlink dev ptmx %s", err) - } - return nil -} - -// pivotRoot will call pivot_root such that rootfs becomes the new root -// filesystem, and everything else is cleaned up. -func pivotRoot(rootfs string) error { - // While the documentation may claim otherwise, pivot_root(".", ".") is - // actually valid. What this results in is / being the new root but - // /proc/self/cwd being the old root. Since we can play around with the cwd - // with pivot_root this allows us to pivot without creating directories in - // the rootfs. Shout-outs to the LXC developers for giving us this idea. - - oldroot, err := syscall.Open("/", syscall.O_DIRECTORY|syscall.O_RDONLY, 0) - if err != nil { - return err - } - defer syscall.Close(oldroot) - - newroot, err := syscall.Open(rootfs, syscall.O_DIRECTORY|syscall.O_RDONLY, 0) - if err != nil { - return err - } - defer syscall.Close(newroot) - - // Change to the new root so that the pivot_root actually acts on it. - if err := syscall.Fchdir(newroot); err != nil { - return err - } - - if err := syscall.PivotRoot(".", "."); err != nil { - return fmt.Errorf("pivot_root %s", err) - } - - // Currently our "." is oldroot (according to the current kernel code). - // However, purely for safety, we will fchdir(oldroot) since there isn't - // really any guarantee from the kernel what /proc/self/cwd will be after a - // pivot_root(2). - - if err := syscall.Fchdir(oldroot); err != nil { - return err - } - - // Make oldroot rprivate to make sure our unmounts don't propagate to the - // host (and thus bork the machine). - if err := syscall.Mount("", ".", "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { - return err - } - // Preform the unmount. MNT_DETACH allows us to unmount /proc/self/cwd. - if err := syscall.Unmount(".", syscall.MNT_DETACH); err != nil { - return err - } - - // Switch back to our shiny new root. - if err := syscall.Chdir("/"); err != nil { - return fmt.Errorf("chdir / %s", err) - } - return nil -} - -func msMoveRoot(rootfs string) error { - if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { - return err - } - if err := syscall.Chroot("."); err != nil { - return err - } - return syscall.Chdir("/") -} - -// createIfNotExists creates a file or a directory only if it does not already exist. -func createIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0755) - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} - -// readonlyPath will make a path read only. -func readonlyPath(path string) error { - if err := syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - return syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "") -} - -// remountReadonly will remount an existing mount point and ensure that it is read-only. -func remountReadonly(m *configs.Mount) error { - var ( - dest = m.Destination - flags = m.Flags - ) - for i := 0; i < 5; i++ { - if err := syscall.Mount("", dest, "", uintptr(flags|syscall.MS_REMOUNT|syscall.MS_RDONLY), ""); err != nil { - switch err { - case syscall.EBUSY: - time.Sleep(100 * time.Millisecond) - continue - default: - return err - } - } - return nil - } - return fmt.Errorf("unable to mount %s as readonly max retries reached", dest) -} - -// maskPath masks the top of the specified path inside a container to avoid -// security issues from processes reading information from non-namespace aware -// mounts ( proc/kcore ). -// For files, maskPath bind mounts /dev/null over the top of the specified path. -// For directories, maskPath mounts read-only tmpfs over the top of the specified path. -func maskPath(path string) error { - if err := syscall.Mount("/dev/null", path, "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) { - if err == syscall.ENOTDIR { - return syscall.Mount("tmpfs", path, "tmpfs", syscall.MS_RDONLY, "") - } - return err - } - return nil -} - -// writeSystemProperty writes the value to a path under /proc/sys as determined from the key. -// For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward. -func writeSystemProperty(key, value string) error { - keyPath := strings.Replace(key, ".", "/", -1) - return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644) -} - -func remount(m *configs.Mount, rootfs string) error { - var ( - dest = m.Destination - ) - if !strings.HasPrefix(dest, rootfs) { - dest = filepath.Join(rootfs, dest) - } - if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags|syscall.MS_REMOUNT), ""); err != nil { - return err - } - return nil -} - -// Do the mount operation followed by additional mounts required to take care -// of propagation flags. -func mountPropagate(m *configs.Mount, rootfs string, mountLabel string) error { - var ( - dest = m.Destination - data = label.FormatMountLabel(m.Data, mountLabel) - flags = m.Flags - ) - if libcontainerUtils.CleanPath(dest) == "/dev" { - flags &= ^syscall.MS_RDONLY - } - - copyUp := m.Extensions&configs.EXT_COPYUP == configs.EXT_COPYUP - if !(copyUp || strings.HasPrefix(dest, rootfs)) { - dest = filepath.Join(rootfs, dest) - } - - if err := syscall.Mount(m.Source, dest, m.Device, uintptr(flags), data); err != nil { - return err - } - - for _, pflag := range m.PropagationFlags { - if err := syscall.Mount("", dest, "", uintptr(pflag), ""); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go deleted file mode 100644 index c7bdb605a..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux,go1.5 - -package libcontainer - -import "syscall" - -// Set the GidMappingsEnableSetgroups member to true, so the process's -// setgroups proc entry wont be set to 'deny' if GidMappings are set -func enableSetgroups(sys *syscall.SysProcAttr) { - sys.GidMappingsEnableSetgroups = true -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go deleted file mode 100644 index 48cc0ae02..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - - "github.com/opencontainers/runc/libcontainer/apparmor" - "github.com/opencontainers/runc/libcontainer/keys" - "github.com/opencontainers/runc/libcontainer/seccomp" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/selinux/go-selinux/label" -) - -// linuxSetnsInit performs the container's initialization for running a new process -// inside an existing container. -type linuxSetnsInit struct { - pipe *os.File - consoleSocket *os.File - config *initConfig -} - -func (l *linuxSetnsInit) getSessionRingName() string { - return fmt.Sprintf("_ses.%s", l.config.ContainerId) -} - -func (l *linuxSetnsInit) Init() error { - if !l.config.Config.NoNewKeyring { - // do not inherit the parent's session keyring - if _, err := keys.JoinSessionKeyring(l.getSessionRingName()); err != nil { - return err - } - } - if l.config.CreateConsole { - if err := setupConsole(l.consoleSocket, l.config, false); err != nil { - return err - } - if err := system.Setctty(); err != nil { - return err - } - } - if l.config.NoNewPrivileges { - if err := system.Prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil { - return err - } - } - if l.config.Config.Seccomp != nil { - if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { - return err - } - } - if err := finalizeNamespace(l.config); err != nil { - return err - } - if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil { - return err - } - if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil { - return err - } - return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ()) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go deleted file mode 100644 index ee6f19a72..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go +++ /dev/null @@ -1,190 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - "os/exec" - "syscall" - - "github.com/opencontainers/runc/libcontainer/apparmor" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/keys" - "github.com/opencontainers/runc/libcontainer/seccomp" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/selinux/go-selinux/label" -) - -type linuxStandardInit struct { - pipe *os.File - consoleSocket *os.File - parentPid int - stateDirFD int - config *initConfig -} - -func (l *linuxStandardInit) getSessionRingParams() (string, uint32, uint32) { - var newperms uint32 - - if l.config.Config.Namespaces.Contains(configs.NEWUSER) { - // with user ns we need 'other' search permissions - newperms = 0x8 - } else { - // without user ns we need 'UID' search permissions - newperms = 0x80000 - } - - // create a unique per session container name that we can - // join in setns; however, other containers can also join it - return fmt.Sprintf("_ses.%s", l.config.ContainerId), 0xffffffff, newperms -} - -// PR_SET_NO_NEW_PRIVS isn't exposed in Golang so we define it ourselves copying the value -// the kernel -const PR_SET_NO_NEW_PRIVS = 0x26 - -func (l *linuxStandardInit) Init() error { - if !l.config.Config.NoNewKeyring { - ringname, keepperms, newperms := l.getSessionRingParams() - - // do not inherit the parent's session keyring - sessKeyId, err := keys.JoinSessionKeyring(ringname) - if err != nil { - return err - } - // make session keyring searcheable - if err := keys.ModKeyringPerm(sessKeyId, keepperms, newperms); err != nil { - return err - } - } - - if err := setupNetwork(l.config); err != nil { - return err - } - if err := setupRoute(l.config.Config); err != nil { - return err - } - - label.Init() - - // prepareRootfs() can be executed only for a new mount namespace. - if l.config.Config.Namespaces.Contains(configs.NEWNS) { - if err := prepareRootfs(l.pipe, l.config.Config); err != nil { - return err - } - } - - // Set up the console. This has to be done *before* we finalize the rootfs, - // but *after* we've given the user the chance to set up all of the mounts - // they wanted. - if l.config.CreateConsole { - if err := setupConsole(l.consoleSocket, l.config, true); err != nil { - return err - } - if err := system.Setctty(); err != nil { - return err - } - } - - // Finish the rootfs setup. - if l.config.Config.Namespaces.Contains(configs.NEWNS) { - if err := finalizeRootfs(l.config.Config); err != nil { - return err - } - } - - if hostname := l.config.Config.Hostname; hostname != "" { - if err := syscall.Sethostname([]byte(hostname)); err != nil { - return err - } - } - if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil { - return err - } - if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil { - return err - } - - for key, value := range l.config.Config.Sysctl { - if err := writeSystemProperty(key, value); err != nil { - return err - } - } - for _, path := range l.config.Config.ReadonlyPaths { - if err := readonlyPath(path); err != nil { - return err - } - } - for _, path := range l.config.Config.MaskPaths { - if err := maskPath(path); err != nil { - return err - } - } - pdeath, err := system.GetParentDeathSignal() - if err != nil { - return err - } - if l.config.NoNewPrivileges { - if err := system.Prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil { - return err - } - } - // Tell our parent that we're ready to Execv. This must be done before the - // Seccomp rules have been applied, because we need to be able to read and - // write to a socket. - if err := syncParentReady(l.pipe); err != nil { - return err - } - // Without NoNewPrivileges seccomp is a privileged operation, so we need to - // do this before dropping capabilities; otherwise do it as late as possible - // just before execve so as few syscalls take place after it as possible. - if l.config.Config.Seccomp != nil && !l.config.NoNewPrivileges { - if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { - return err - } - } - if err := finalizeNamespace(l.config); err != nil { - return err - } - // finalizeNamespace can change user/group which clears the parent death - // signal, so we restore it here. - if err := pdeath.Restore(); err != nil { - return err - } - // compare the parent from the initial start of the init process and make sure that it did not change. - // if the parent changes that means it died and we were reparented to something else so we should - // just kill ourself and not cause problems for someone else. - if syscall.Getppid() != l.parentPid { - return syscall.Kill(syscall.Getpid(), syscall.SIGKILL) - } - // check for the arg before waiting to make sure it exists and it is returned - // as a create time error. - name, err := exec.LookPath(l.config.Args[0]) - if err != nil { - return err - } - // close the pipe to signal that we have completed our init. - l.pipe.Close() - // wait for the fifo to be opened on the other side before - // exec'ing the users process. - fd, err := syscall.Openat(l.stateDirFD, execFifoFilename, os.O_WRONLY|syscall.O_CLOEXEC, 0) - if err != nil { - return newSystemErrorWithCause(err, "openat exec fifo") - } - if _, err := syscall.Write(fd, []byte("0")); err != nil { - return newSystemErrorWithCause(err, "write 0 exec fifo") - } - if l.config.Config.Seccomp != nil && l.config.NoNewPrivileges { - if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { - return newSystemErrorWithCause(err, "init seccomp") - } - } - // close the statedir fd before exec because the kernel resets dumpable in the wrong order - // https://github.com/torvalds/linux/blob/v4.9/fs/exec.c#L1290-L1318 - syscall.Close(l.stateDirFD) - if err := syscall.Exec(name, l.config.Args[0:], os.Environ()); err != nil { - return newSystemErrorWithCause(err, "exec user process") - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go deleted file mode 100644 index 62878acf0..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go +++ /dev/null @@ -1,247 +0,0 @@ -// +build linux - -package libcontainer - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/utils" -) - -func newStateTransitionError(from, to containerState) error { - return &stateTransitionError{ - From: from.status().String(), - To: to.status().String(), - } -} - -// stateTransitionError is returned when an invalid state transition happens from one -// state to another. -type stateTransitionError struct { - From string - To string -} - -func (s *stateTransitionError) Error() string { - return fmt.Sprintf("invalid state transition from %s to %s", s.From, s.To) -} - -type containerState interface { - transition(containerState) error - destroy() error - status() Status -} - -func destroy(c *linuxContainer) error { - if !c.config.Namespaces.Contains(configs.NEWPID) { - if err := signalAllProcesses(c.cgroupManager, syscall.SIGKILL); err != nil { - logrus.Warn(err) - } - } - err := c.cgroupManager.Destroy() - if rerr := os.RemoveAll(c.root); err == nil { - err = rerr - } - c.initProcess = nil - if herr := runPoststopHooks(c); err == nil { - err = herr - } - c.state = &stoppedState{c: c} - return err -} - -func runPoststopHooks(c *linuxContainer) error { - if c.config.Hooks != nil { - s := configs.HookState{ - Version: c.config.Version, - ID: c.id, - Bundle: utils.SearchLabels(c.config.Labels, "bundle"), - } - for _, hook := range c.config.Hooks.Poststop { - if err := hook.Run(s); err != nil { - return err - } - } - } - return nil -} - -// stoppedState represents a container is a stopped/destroyed state. -type stoppedState struct { - c *linuxContainer -} - -func (b *stoppedState) status() Status { - return Stopped -} - -func (b *stoppedState) transition(s containerState) error { - switch s.(type) { - case *runningState, *restoredState: - b.c.state = s - return nil - case *stoppedState: - return nil - } - return newStateTransitionError(b, s) -} - -func (b *stoppedState) destroy() error { - return destroy(b.c) -} - -// runningState represents a container that is currently running. -type runningState struct { - c *linuxContainer -} - -func (r *runningState) status() Status { - return Running -} - -func (r *runningState) transition(s containerState) error { - switch s.(type) { - case *stoppedState: - t, err := r.c.runType() - if err != nil { - return err - } - if t == Running { - return newGenericError(fmt.Errorf("container still running"), ContainerNotStopped) - } - r.c.state = s - return nil - case *pausedState: - r.c.state = s - return nil - case *runningState: - return nil - } - return newStateTransitionError(r, s) -} - -func (r *runningState) destroy() error { - t, err := r.c.runType() - if err != nil { - return err - } - if t == Running { - return newGenericError(fmt.Errorf("container is not destroyed"), ContainerNotStopped) - } - return destroy(r.c) -} - -type createdState struct { - c *linuxContainer -} - -func (i *createdState) status() Status { - return Created -} - -func (i *createdState) transition(s containerState) error { - switch s.(type) { - case *runningState, *pausedState, *stoppedState: - i.c.state = s - return nil - case *createdState: - return nil - } - return newStateTransitionError(i, s) -} - -func (i *createdState) destroy() error { - i.c.initProcess.signal(syscall.SIGKILL) - return destroy(i.c) -} - -// pausedState represents a container that is currently pause. It cannot be destroyed in a -// paused state and must transition back to running first. -type pausedState struct { - c *linuxContainer -} - -func (p *pausedState) status() Status { - return Paused -} - -func (p *pausedState) transition(s containerState) error { - switch s.(type) { - case *runningState, *stoppedState: - p.c.state = s - return nil - case *pausedState: - return nil - } - return newStateTransitionError(p, s) -} - -func (p *pausedState) destroy() error { - t, err := p.c.runType() - if err != nil { - return err - } - if t != Running && t != Created { - if err := p.c.cgroupManager.Freeze(configs.Thawed); err != nil { - return err - } - return destroy(p.c) - } - return newGenericError(fmt.Errorf("container is paused"), ContainerPaused) -} - -// restoredState is the same as the running state but also has associated checkpoint -// information that maybe need destroyed when the container is stopped and destroy is called. -type restoredState struct { - imageDir string - c *linuxContainer -} - -func (r *restoredState) status() Status { - return Running -} - -func (r *restoredState) transition(s containerState) error { - switch s.(type) { - case *stoppedState, *runningState: - return nil - } - return newStateTransitionError(r, s) -} - -func (r *restoredState) destroy() error { - if _, err := os.Stat(filepath.Join(r.c.root, "checkpoint")); err != nil { - if !os.IsNotExist(err) { - return err - } - } - return destroy(r.c) -} - -// loadedState is used whenever a container is restored, loaded, or setting additional -// processes inside and it should not be destroyed when it is exiting. -type loadedState struct { - c *linuxContainer - s Status -} - -func (n *loadedState) status() Status { - return n.s -} - -func (n *loadedState) transition(s containerState) error { - n.c.state = s - return nil -} - -func (n *loadedState) destroy() error { - if err := n.c.refreshState(); err != nil { - return err - } - return n.c.state.destroy() -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/stats.go deleted file mode 100644 index 303e4b94c..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats.go +++ /dev/null @@ -1,15 +0,0 @@ -package libcontainer - -type NetworkInterface struct { - // Name is the name of the network interface. - Name string - - RxBytes uint64 - RxPackets uint64 - RxErrors uint64 - RxDropped uint64 - TxBytes uint64 - TxPackets uint64 - TxErrors uint64 - TxDropped uint64 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go deleted file mode 100644 index f8d1d689c..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainer - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go deleted file mode 100644 index c629dc67d..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go +++ /dev/null @@ -1,8 +0,0 @@ -package libcontainer - -import "github.com/opencontainers/runc/libcontainer/cgroups" - -type Stats struct { - Interfaces []*NetworkInterface - CgroupStats *cgroups.Stats -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go deleted file mode 100644 index da78c1c2e..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package libcontainer - -// Solaris - TODO - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go deleted file mode 100644 index f8d1d689c..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainer - -type Stats struct { - Interfaces []*NetworkInterface -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/sync.go b/vendor/github.com/opencontainers/runc/libcontainer/sync.go deleted file mode 100644 index cf7b45bc3..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/sync.go +++ /dev/null @@ -1,107 +0,0 @@ -package libcontainer - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/opencontainers/runc/libcontainer/utils" -) - -type syncType string - -// Constants that are used for synchronisation between the parent and child -// during container setup. They come in pairs (with procError being a generic -// response which is followed by a &genericError). -// -// [ child ] <-> [ parent ] -// -// procHooks --> [run hooks] -// <-- procResume -// -// procConsole --> -// <-- procConsoleReq -// [send(fd)] --> [recv(fd)] -// <-- procConsoleAck -// -// procReady --> [final setup] -// <-- procRun -const ( - procError syncType = "procError" - procReady syncType = "procReady" - procRun syncType = "procRun" - procHooks syncType = "procHooks" - procResume syncType = "procResume" -) - -type syncT struct { - Type syncType `json:"type"` -} - -// writeSync is used to write to a synchronisation pipe. An error is returned -// if there was a problem writing the payload. -func writeSync(pipe io.Writer, sync syncType) error { - if err := utils.WriteJSON(pipe, syncT{sync}); err != nil { - return err - } - return nil -} - -// readSync is used to read from a synchronisation pipe. An error is returned -// if we got a genericError, the pipe was closed, or we got an unexpected flag. -func readSync(pipe io.Reader, expected syncType) error { - var procSync syncT - if err := json.NewDecoder(pipe).Decode(&procSync); err != nil { - if err == io.EOF { - return fmt.Errorf("parent closed synchronisation channel") - } - - if procSync.Type == procError { - var ierr genericError - - if err := json.NewDecoder(pipe).Decode(&ierr); err != nil { - return fmt.Errorf("failed reading error from parent: %v", err) - } - - return &ierr - } - - if procSync.Type != expected { - return fmt.Errorf("invalid synchronisation flag from parent") - } - } - return nil -} - -// parseSync runs the given callback function on each syncT received from the -// child. It will return once io.EOF is returned from the given pipe. -func parseSync(pipe io.Reader, fn func(*syncT) error) error { - dec := json.NewDecoder(pipe) - for { - var sync syncT - if err := dec.Decode(&sync); err != nil { - if err == io.EOF { - break - } - return err - } - - // We handle this case outside fn for cleanliness reasons. - var ierr *genericError - if sync.Type == procError { - if err := dec.Decode(&ierr); err != nil && err != io.EOF { - return newSystemErrorWithCause(err, "decoding proc error from init") - } - if ierr != nil { - return ierr - } - // Programmer error. - panic("No error following JSON procError payload.") - } - - if err := fn(&sync); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/list.go b/vendor/github.com/opencontainers/runc/list.go deleted file mode 100644 index 1c3b9aa83..000000000 --- a/vendor/github.com/opencontainers/runc/list.go +++ /dev/null @@ -1,175 +0,0 @@ -// +build linux - -package main - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" - "text/tabwriter" - "time" - - "encoding/json" - - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/user" - "github.com/opencontainers/runc/libcontainer/utils" - "github.com/urfave/cli" -) - -const formatOptions = `table or json` - -// containerState represents the platform agnostic pieces relating to a -// running container's status and state -type containerState struct { - // Version is the OCI version for the container - Version string `json:"ociVersion"` - // ID is the container ID - ID string `json:"id"` - // InitProcessPid is the init process id in the parent namespace - InitProcessPid int `json:"pid"` - // Status is the current status of the container, running, paused, ... - Status string `json:"status"` - // Bundle is the path on the filesystem to the bundle - Bundle string `json:"bundle"` - // Rootfs is a path to a directory containing the container's root filesystem. - Rootfs string `json:"rootfs"` - // Created is the unix timestamp for the creation time of the container in UTC - Created time.Time `json:"created"` - // Annotations is the user defined annotations added to the config. - Annotations map[string]string `json:"annotations,omitempty"` - // The owner of the state directory (the owner of the container). - Owner string `json:"owner"` -} - -var listCommand = cli.Command{ - Name: "list", - Usage: "lists containers started by runc with the given root", - ArgsUsage: ` - -Where the given root is specified via the global option "--root" -(default: "/run/runc"). - -EXAMPLE 1: -To list containers created via the default "--root": - # runc list - -EXAMPLE 2: -To list containers created using a non-default value for "--root": - # runc --root value list`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "format, f", - Value: "table", - Usage: `select one of: ` + formatOptions, - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "display only container IDs", - }, - }, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 0, exactArgs); err != nil { - return err - } - s, err := getContainers(context) - if err != nil { - return err - } - - if context.Bool("quiet") { - for _, item := range s { - fmt.Println(item.ID) - } - return nil - } - - switch context.String("format") { - case "table": - w := tabwriter.NewWriter(os.Stdout, 12, 1, 3, ' ', 0) - fmt.Fprint(w, "ID\tPID\tSTATUS\tBUNDLE\tCREATED\tOWNER\n") - for _, item := range s { - fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\n", - item.ID, - item.InitProcessPid, - item.Status, - item.Bundle, - item.Created.Format(time.RFC3339Nano), - item.Owner) - } - if err := w.Flush(); err != nil { - return err - } - case "json": - if err := json.NewEncoder(os.Stdout).Encode(s); err != nil { - return err - } - default: - return fmt.Errorf("invalid format option") - } - return nil - }, -} - -func getContainers(context *cli.Context) ([]containerState, error) { - factory, err := loadFactory(context) - if err != nil { - return nil, err - } - root := context.GlobalString("root") - absRoot, err := filepath.Abs(root) - if err != nil { - return nil, err - } - list, err := ioutil.ReadDir(absRoot) - if err != nil { - fatal(err) - } - - var s []containerState - for _, item := range list { - if item.IsDir() { - // This cast is safe on Linux. - stat := item.Sys().(*syscall.Stat_t) - owner, err := user.LookupUid(int(stat.Uid)) - if err != nil { - owner.Name = string(stat.Uid) - } - - container, err := factory.Load(item.Name()) - if err != nil { - fmt.Fprintf(os.Stderr, "load container %s: %v\n", item.Name(), err) - continue - } - containerStatus, err := container.Status() - if err != nil { - fmt.Fprintf(os.Stderr, "status for %s: %v\n", item.Name(), err) - continue - } - state, err := container.State() - if err != nil { - fmt.Fprintf(os.Stderr, "state for %s: %v\n", item.Name(), err) - continue - } - pid := state.BaseState.InitProcessPid - if containerStatus == libcontainer.Stopped { - pid = 0 - } - bundle, annotations := utils.Annotations(state.Config.Labels) - s = append(s, containerState{ - Version: state.BaseState.Config.Version, - ID: state.BaseState.ID, - InitProcessPid: pid, - Status: containerStatus.String(), - Bundle: bundle, - Rootfs: state.BaseState.Config.Rootfs, - Created: state.BaseState.Created, - Annotations: annotations, - Owner: owner.Name, - }) - } - } - return s, nil -} diff --git a/vendor/github.com/opencontainers/runc/main.go b/vendor/github.com/opencontainers/runc/main.go deleted file mode 100644 index 1cb8f4dbf..000000000 --- a/vendor/github.com/opencontainers/runc/main.go +++ /dev/null @@ -1,149 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/urfave/cli" -) - -// version will be populated by the Makefile, read from -// VERSION file of the source code. -var version = "" - -// gitCommit will be the hash that the binary was built from -// and will be populated by the Makefile -var gitCommit = "" - -const ( - specConfig = "config.json" - usage = `Open Container Initiative runtime - -runc is a command line client for running applications packaged according to -the Open Container Initiative (OCI) format and is a compliant implementation of the -Open Container Initiative specification. - -runc integrates well with existing process supervisors to provide a production -container runtime environment for applications. It can be used with your -existing process monitoring tools and the container will be spawned as a -direct child of the process supervisor. - -Containers are configured using bundles. A bundle for a container is a directory -that includes a specification file named "` + specConfig + `" and a root filesystem. -The root filesystem contains the contents of the container. - -To start a new instance of a container: - - # runc run [ -b bundle ] - -Where "" is your name for the instance of the container that you -are starting. The name you provide for the container instance must be unique on -your host. Providing the bundle directory using "-b" is optional. The default -value for "bundle" is the current directory.` -) - -func main() { - app := cli.NewApp() - app.Name = "runc" - app.Usage = usage - - var v []string - if version != "" { - v = append(v, version) - } - if gitCommit != "" { - v = append(v, fmt.Sprintf("commit: %s", gitCommit)) - } - v = append(v, fmt.Sprintf("spec: %s", specs.Version)) - app.Version = strings.Join(v, "\n") - app.Flags = []cli.Flag{ - cli.BoolFlag{ - Name: "debug", - Usage: "enable debug output for logging", - }, - cli.StringFlag{ - Name: "log", - Value: "/dev/null", - Usage: "set the log file path where internal debug information is written", - }, - cli.StringFlag{ - Name: "log-format", - Value: "text", - Usage: "set the format used by logs ('text' (default), or 'json')", - }, - cli.StringFlag{ - Name: "root", - Value: "/run/runc", - Usage: "root directory for storage of container state (this should be located in tmpfs)", - }, - cli.StringFlag{ - Name: "criu", - Value: "criu", - Usage: "path to the criu binary used for checkpoint and restore", - }, - cli.BoolFlag{ - Name: "systemd-cgroup", - Usage: "enable systemd cgroup support, expects cgroupsPath to be of form \"slice:prefix:name\" for e.g. \"system.slice:runc:434234\"", - }, - } - app.Commands = []cli.Command{ - checkpointCommand, - createCommand, - deleteCommand, - eventsCommand, - execCommand, - initCommand, - killCommand, - listCommand, - pauseCommand, - psCommand, - restoreCommand, - resumeCommand, - runCommand, - specCommand, - startCommand, - stateCommand, - updateCommand, - } - app.Before = func(context *cli.Context) error { - if context.GlobalBool("debug") { - logrus.SetLevel(logrus.DebugLevel) - } - if path := context.GlobalString("log"); path != "" { - f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666) - if err != nil { - return err - } - logrus.SetOutput(f) - } - switch context.GlobalString("log-format") { - case "text": - // retain logrus's default. - case "json": - logrus.SetFormatter(new(logrus.JSONFormatter)) - default: - return fmt.Errorf("unknown log-format %q", context.GlobalString("log-format")) - } - return nil - } - // If the command returns an error, cli takes upon itself to print - // the error on cli.ErrWriter and exit. - // Use our own writer here to ensure the log gets sent to the right location. - cli.ErrWriter = &FatalWriter{cli.ErrWriter} - if err := app.Run(os.Args); err != nil { - fatal(err) - } -} - -type FatalWriter struct { - cliErrWriter io.Writer -} - -func (f *FatalWriter) Write(p []byte) (n int, err error) { - logrus.Error(string(p)) - return f.cliErrWriter.Write(p) -} diff --git a/vendor/github.com/opencontainers/runc/main_solaris.go b/vendor/github.com/opencontainers/runc/main_solaris.go deleted file mode 100644 index 3c365db14..000000000 --- a/vendor/github.com/opencontainers/runc/main_solaris.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build solaris - -package main - -import "github.com/urfave/cli" - -var ( - checkpointCommand cli.Command - eventsCommand cli.Command - restoreCommand cli.Command - specCommand cli.Command - killCommand cli.Command - deleteCommand cli.Command - execCommand cli.Command - initCommand cli.Command - listCommand cli.Command - pauseCommand cli.Command - resumeCommand cli.Command - startCommand cli.Command - stateCommand cli.Command -) diff --git a/vendor/github.com/opencontainers/runc/main_unix.go b/vendor/github.com/opencontainers/runc/main_unix.go deleted file mode 100644 index b601abc75..000000000 --- a/vendor/github.com/opencontainers/runc/main_unix.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build linux - -package main - -import ( - "os" - "runtime" - - "github.com/opencontainers/runc/libcontainer" - _ "github.com/opencontainers/runc/libcontainer/nsenter" - "github.com/urfave/cli" -) - -func init() { - if len(os.Args) > 1 && os.Args[1] == "init" { - runtime.GOMAXPROCS(1) - runtime.LockOSThread() - } -} - -var initCommand = cli.Command{ - Name: "init", - Usage: `initialize the namespaces and launch the process (do not call it outside of runc)`, - Action: func(context *cli.Context) error { - factory, _ := libcontainer.New("") - if err := factory.StartInitialization(); err != nil { - // as the error is sent back to the parent there is no need to log - // or write it to stderr because the parent process will handle this - os.Exit(1) - } - panic("libcontainer: container init failed to exec") - }, -} diff --git a/vendor/github.com/opencontainers/runc/main_unsupported.go b/vendor/github.com/opencontainers/runc/main_unsupported.go deleted file mode 100644 index 4d69931a6..000000000 --- a/vendor/github.com/opencontainers/runc/main_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux,!solaris - -package main - -import "github.com/urfave/cli" - -var ( - checkpointCommand cli.Command - eventsCommand cli.Command - restoreCommand cli.Command - specCommand cli.Command - killCommand cli.Command -) diff --git a/vendor/github.com/opencontainers/runc/notify_socket.go b/vendor/github.com/opencontainers/runc/notify_socket.go deleted file mode 100644 index 2bd678893..000000000 --- a/vendor/github.com/opencontainers/runc/notify_socket.go +++ /dev/null @@ -1,108 +0,0 @@ -// +build linux - -package main - -import ( - "bytes" - "fmt" - "net" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/urfave/cli" -) - -type notifySocket struct { - socket *net.UnixConn - host string - socketPath string -} - -func newNotifySocket(context *cli.Context, notifySocketHost string, id string) *notifySocket { - if notifySocketHost == "" { - return nil - } - - root := filepath.Join(context.GlobalString("root"), id) - path := filepath.Join(root, "notify.sock") - - notifySocket := ¬ifySocket{ - socket: nil, - host: notifySocketHost, - socketPath: path, - } - - return notifySocket -} - -func (ns *notifySocket) Close() error { - return ns.socket.Close() -} - -// If systemd is supporting sd_notify protocol, this function will add support -// for sd_notify protocol from within the container. -func (s *notifySocket) setupSpec(context *cli.Context, spec *specs.Spec) { - mount := specs.Mount{Destination: s.host, Type: "bind", Source: s.socketPath, Options: []string{"bind"}} - spec.Mounts = append(spec.Mounts, mount) - spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("NOTIFY_SOCKET=%s", s.host)) -} - -func (s *notifySocket) setupSocket() error { - addr := net.UnixAddr{ - Name: s.socketPath, - Net: "unixgram", - } - - socket, err := net.ListenUnixgram("unixgram", &addr) - if err != nil { - return err - } - - s.socket = socket - return nil -} - -// pid1 must be set only with -d, as it is used to set the new process as the main process -// for the service in systemd -func (notifySocket *notifySocket) run(pid1 int) { - buf := make([]byte, 512) - notifySocketHostAddr := net.UnixAddr{Name: notifySocket.host, Net: "unixgram"} - client, err := net.DialUnix("unixgram", nil, ¬ifySocketHostAddr) - if err != nil { - logrus.Error(err) - return - } - for { - r, err := notifySocket.socket.Read(buf) - if err != nil { - break - } - var out bytes.Buffer - for _, line := range bytes.Split(buf[0:r], []byte{'\n'}) { - if bytes.HasPrefix(line, []byte("READY=")) { - _, err = out.Write(line) - if err != nil { - return - } - - _, err = out.Write([]byte{'\n'}) - if err != nil { - return - } - - _, err = client.Write(out.Bytes()) - if err != nil { - return - } - - // now we can inform systemd to use pid1 as the pid to monitor - if pid1 > 0 { - newPid := fmt.Sprintf("MAINPID=%d\n", pid1) - client.Write([]byte(newPid)) - } - return - } - } - } -} diff --git a/vendor/github.com/opencontainers/runc/pause.go b/vendor/github.com/opencontainers/runc/pause.go deleted file mode 100644 index 3b98dbbbf..000000000 --- a/vendor/github.com/opencontainers/runc/pause.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build linux - -package main - -import "github.com/urfave/cli" - -var pauseCommand = cli.Command{ - Name: "pause", - Usage: "pause suspends all processes inside the container", - ArgsUsage: ` - -Where "" is the name for the instance of the container to be -paused. `, - Description: `The pause command suspends all processes in the instance of the container. - -Use runc list to identiy instances of containers and their current status.`, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, exactArgs); err != nil { - return err - } - container, err := getContainer(context) - if err != nil { - return err - } - if err := container.Pause(); err != nil { - return err - } - - return nil - }, -} - -var resumeCommand = cli.Command{ - Name: "resume", - Usage: "resumes all processes that have been previously paused", - ArgsUsage: ` - -Where "" is the name for the instance of the container to be -resumed.`, - Description: `The resume command resumes all processes in the instance of the container. - -Use runc list to identiy instances of containers and their current status.`, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, exactArgs); err != nil { - return err - } - container, err := getContainer(context) - if err != nil { - return err - } - if err := container.Resume(); err != nil { - return err - } - - return nil - }, -} diff --git a/vendor/github.com/opencontainers/runc/ps.go b/vendor/github.com/opencontainers/runc/ps.go deleted file mode 100644 index 6e0c7376a..000000000 --- a/vendor/github.com/opencontainers/runc/ps.go +++ /dev/null @@ -1,109 +0,0 @@ -// +build linux - -package main - -import ( - "encoding/json" - "fmt" - "os" - "os/exec" - "strconv" - "strings" - - "github.com/urfave/cli" -) - -var psCommand = cli.Command{ - Name: "ps", - Usage: "ps displays the processes running inside a container", - ArgsUsage: ` [ps options]`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "format, f", - Value: "table", - Usage: `select one of: ` + formatOptions, - }, - }, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, minArgs); err != nil { - return err - } - // XXX: Currently not supported with rootless containers. - if isRootless() { - return fmt.Errorf("runc ps requires root") - } - - container, err := getContainer(context) - if err != nil { - return err - } - - pids, err := container.Processes() - if err != nil { - return err - } - - switch context.String("format") { - case "table": - case "json": - return json.NewEncoder(os.Stdout).Encode(pids) - default: - return fmt.Errorf("invalid format option") - } - - // [1:] is to remove command name, ex: - // context.Args(): [containet_id ps_arg1 ps_arg2 ...] - // psArgs: [ps_arg1 ps_arg2 ...] - // - psArgs := context.Args()[1:] - if len(psArgs) == 0 { - psArgs = []string{"-ef"} - } - - cmd := exec.Command("ps", psArgs...) - output, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("%s: %s", err, output) - } - - lines := strings.Split(string(output), "\n") - pidIndex, err := getPidIndex(lines[0]) - if err != nil { - return err - } - - fmt.Println(lines[0]) - for _, line := range lines[1:] { - if len(line) == 0 { - continue - } - fields := strings.Fields(line) - p, err := strconv.Atoi(fields[pidIndex]) - if err != nil { - return fmt.Errorf("unexpected pid '%s': %s", fields[pidIndex], err) - } - - for _, pid := range pids { - if pid == p { - fmt.Println(line) - break - } - } - } - return nil - }, - SkipArgReorder: true, -} - -func getPidIndex(title string) (int, error) { - titles := strings.Fields(title) - - pidIndex := -1 - for i, name := range titles { - if name == "PID" { - return i, nil - } - } - - return pidIndex, fmt.Errorf("couldn't find PID field in ps output") -} diff --git a/vendor/github.com/opencontainers/runc/restore.go b/vendor/github.com/opencontainers/runc/restore.go deleted file mode 100644 index 7ddc3373b..000000000 --- a/vendor/github.com/opencontainers/runc/restore.go +++ /dev/null @@ -1,216 +0,0 @@ -// +build linux - -package main - -import ( - "fmt" - "os" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/specconv" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/urfave/cli" -) - -var restoreCommand = cli.Command{ - Name: "restore", - Usage: "restore a container from a previous checkpoint", - ArgsUsage: ` - -Where "" is the name for the instance of the container to be -restored.`, - Description: `Restores the saved state of the container instance that was previously saved -using the runc checkpoint command.`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "image-path", - Value: "", - Usage: "path to criu image files for restoring", - }, - cli.StringFlag{ - Name: "work-path", - Value: "", - Usage: "path for saving work files and logs", - }, - cli.BoolFlag{ - Name: "tcp-established", - Usage: "allow open tcp connections", - }, - cli.BoolFlag{ - Name: "ext-unix-sk", - Usage: "allow external unix sockets", - }, - cli.BoolFlag{ - Name: "shell-job", - Usage: "allow shell jobs", - }, - cli.BoolFlag{ - Name: "file-locks", - Usage: "handle file locks, for safety", - }, - cli.StringFlag{ - Name: "manage-cgroups-mode", - Value: "", - Usage: "cgroups mode: 'soft' (default), 'full' and 'strict'", - }, - cli.StringFlag{ - Name: "bundle, b", - Value: "", - Usage: "path to the root of the bundle directory", - }, - cli.BoolFlag{ - Name: "detach,d", - Usage: "detach from the container's process", - }, - cli.StringFlag{ - Name: "pid-file", - Value: "", - Usage: "specify the file to write the process id to", - }, - cli.BoolFlag{ - Name: "no-subreaper", - Usage: "disable the use of the subreaper used to reap reparented processes", - }, - cli.BoolFlag{ - Name: "no-pivot", - Usage: "do not use pivot root to jail process inside rootfs. This should be used whenever the rootfs is on top of a ramdisk", - }, - cli.StringSliceFlag{ - Name: "empty-ns", - Usage: "create a namespace, but don't restore its properties", - }, - }, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, exactArgs); err != nil { - return err - } - // XXX: Currently this is untested with rootless containers. - if isRootless() { - return fmt.Errorf("runc restore requires root") - } - - imagePath := context.String("image-path") - id := context.Args().First() - if id == "" { - return errEmptyID - } - if imagePath == "" { - imagePath = getDefaultImagePath(context) - } - bundle := context.String("bundle") - if bundle != "" { - if err := os.Chdir(bundle); err != nil { - return err - } - } - spec, err := loadSpec(specConfig) - if err != nil { - return err - } - config, err := specconv.CreateLibcontainerConfig(&specconv.CreateOpts{ - CgroupName: id, - UseSystemdCgroup: context.GlobalBool("systemd-cgroup"), - NoPivotRoot: context.Bool("no-pivot"), - Spec: spec, - }) - if err != nil { - return err - } - status, err := restoreContainer(context, spec, config, imagePath) - if err == nil { - os.Exit(status) - } - return err - }, -} - -func restoreContainer(context *cli.Context, spec *specs.Spec, config *configs.Config, imagePath string) (int, error) { - var ( - rootuid = 0 - rootgid = 0 - id = context.Args().First() - ) - factory, err := loadFactory(context) - if err != nil { - return -1, err - } - container, err := factory.Load(id) - if err != nil { - container, err = factory.Create(id, config) - if err != nil { - return -1, err - } - } - options := criuOptions(context) - - status, err := container.Status() - if err != nil { - logrus.Error(err) - } - if status == libcontainer.Running { - fatalf("Container with id %s already running", id) - } - - setManageCgroupsMode(context, options) - - if err = setEmptyNsMask(context, options); err != nil { - return -1, err - } - - // ensure that the container is always removed if we were the process - // that created it. - detach := context.Bool("detach") - if !detach { - defer destroy(container) - } - process := &libcontainer.Process{} - tty, err := setupIO(process, rootuid, rootgid, false, detach, "") - if err != nil { - return -1, err - } - - notifySocket := newNotifySocket(context, os.Getenv("NOTIFY_SOCKET"), id) - if notifySocket != nil { - notifySocket.setupSpec(context, spec) - notifySocket.setupSocket() - } - - handler := newSignalHandler(!context.Bool("no-subreaper"), notifySocket) - if err := container.Restore(process, options); err != nil { - return -1, err - } - // We don't need to do a tty.recvtty because config.Terminal is always false. - defer tty.Close() - if err := tty.ClosePostStart(); err != nil { - return -1, err - } - if pidFile := context.String("pid-file"); pidFile != "" { - if err := createPidFile(pidFile, process); err != nil { - _ = process.Signal(syscall.SIGKILL) - _, _ = process.Wait() - return -1, err - } - } - return handler.forward(process, tty, detach) -} - -func criuOptions(context *cli.Context) *libcontainer.CriuOpts { - imagePath := getCheckpointImagePath(context) - if err := os.MkdirAll(imagePath, 0655); err != nil { - fatal(err) - } - return &libcontainer.CriuOpts{ - ImagesDirectory: imagePath, - WorkDirectory: context.String("work-path"), - ParentImage: context.String("parent-path"), - LeaveRunning: context.Bool("leave-running"), - TcpEstablished: context.Bool("tcp-established"), - ExternalUnixConnections: context.Bool("ext-unix-sk"), - ShellJob: context.Bool("shell-job"), - FileLocks: context.Bool("file-locks"), - PreDump: context.Bool("pre-dump"), - } -} diff --git a/vendor/github.com/opencontainers/runc/rlimit_linux.go b/vendor/github.com/opencontainers/runc/rlimit_linux.go deleted file mode 100644 index c97a0fb4e..000000000 --- a/vendor/github.com/opencontainers/runc/rlimit_linux.go +++ /dev/null @@ -1,49 +0,0 @@ -package main - -import "fmt" - -const ( - RLIMIT_CPU = iota // CPU time in sec - RLIMIT_FSIZE // Maximum filesize - RLIMIT_DATA // max data size - RLIMIT_STACK // max stack size - RLIMIT_CORE // max core file size - RLIMIT_RSS // max resident set size - RLIMIT_NPROC // max number of processes - RLIMIT_NOFILE // max number of open files - RLIMIT_MEMLOCK // max locked-in-memory address space - RLIMIT_AS // address space limit - RLIMIT_LOCKS // maximum file locks held - RLIMIT_SIGPENDING // max number of pending signals - RLIMIT_MSGQUEUE // maximum bytes in POSIX mqueues - RLIMIT_NICE // max nice prio allowed to raise to - RLIMIT_RTPRIO // maximum realtime priority - RLIMIT_RTTIME // timeout for RT tasks in us -) - -var rlimitMap = map[string]int{ - "RLIMIT_CPU": RLIMIT_CPU, - "RLIMIT_FSIZE": RLIMIT_FSIZE, - "RLIMIT_DATA": RLIMIT_DATA, - "RLIMIT_STACK": RLIMIT_STACK, - "RLIMIT_CORE": RLIMIT_CORE, - "RLIMIT_RSS": RLIMIT_RSS, - "RLIMIT_NPROC": RLIMIT_NPROC, - "RLIMIT_NOFILE": RLIMIT_NOFILE, - "RLIMIT_MEMLOCK": RLIMIT_MEMLOCK, - "RLIMIT_AS": RLIMIT_AS, - "RLIMIT_LOCKS": RLIMIT_LOCKS, - "RLIMIT_SIGPENDING": RLIMIT_SIGPENDING, - "RLIMIT_MSGQUEUE": RLIMIT_MSGQUEUE, - "RLIMIT_NICE": RLIMIT_NICE, - "RLIMIT_RTPRIO": RLIMIT_RTPRIO, - "RLIMIT_RTTIME": RLIMIT_RTTIME, -} - -func strToRlimit(key string) (int, error) { - rl, ok := rlimitMap[key] - if !ok { - return 0, fmt.Errorf("wrong rlimit value: %s", key) - } - return rl, nil -} diff --git a/vendor/github.com/opencontainers/runc/run.go b/vendor/github.com/opencontainers/runc/run.go deleted file mode 100644 index ebec222ec..000000000 --- a/vendor/github.com/opencontainers/runc/run.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build linux - -package main - -import ( - "os" - - "github.com/urfave/cli" -) - -// default action is to start a container -var runCommand = cli.Command{ - Name: "run", - Usage: "create and run a container", - ArgsUsage: ` - -Where "" is your name for the instance of the container that you -are starting. The name you provide for the container instance must be unique on -your host.`, - Description: `The run command creates an instance of a container for a bundle. The bundle -is a directory with a specification file named "` + specConfig + `" and a root -filesystem. - -The specification file includes an args parameter. The args parameter is used -to specify command(s) that get run when the container is started. To change the -command(s) that get executed on start, edit the args parameter of the spec. See -"runc spec --help" for more explanation.`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "bundle, b", - Value: "", - Usage: `path to the root of the bundle directory, defaults to the current directory`, - }, - cli.StringFlag{ - Name: "console-socket", - Value: "", - Usage: "path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal", - }, - cli.BoolFlag{ - Name: "detach, d", - Usage: "detach from the container's process", - }, - cli.StringFlag{ - Name: "pid-file", - Value: "", - Usage: "specify the file to write the process id to", - }, - cli.BoolFlag{ - Name: "no-subreaper", - Usage: "disable the use of the subreaper used to reap reparented processes", - }, - cli.BoolFlag{ - Name: "no-pivot", - Usage: "do not use pivot root to jail process inside rootfs. This should be used whenever the rootfs is on top of a ramdisk", - }, - cli.BoolFlag{ - Name: "no-new-keyring", - Usage: "do not create a new session keyring for the container. This will cause the container to inherit the calling processes session key", - }, - cli.IntFlag{ - Name: "preserve-fds", - Usage: "Pass N additional file descriptors to the container (stdio + $LISTEN_FDS + N in total)", - }, - }, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, exactArgs); err != nil { - return err - } - if err := revisePidFile(context); err != nil { - return err - } - spec, err := setupSpec(context) - if err != nil { - return err - } - status, err := startContainer(context, spec, false) - if err == nil { - // exit with the container's exit status so any external supervisor is - // notified of the exit with the correct exit status. - os.Exit(status) - } - return err - }, -} diff --git a/vendor/github.com/opencontainers/runc/signals.go b/vendor/github.com/opencontainers/runc/signals.go deleted file mode 100644 index d787c940b..000000000 --- a/vendor/github.com/opencontainers/runc/signals.go +++ /dev/null @@ -1,135 +0,0 @@ -// +build linux - -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/runc/libcontainer/utils" -) - -const signalBufferSize = 2048 - -// newSignalHandler returns a signal handler for processing SIGCHLD and SIGWINCH signals -// while still forwarding all other signals to the process. -// If notifySocket is present, use it to read systemd notifications from the container and -// forward them to notifySocketHost. -func newSignalHandler(enableSubreaper bool, notifySocket *notifySocket) *signalHandler { - if enableSubreaper { - // set us as the subreaper before registering the signal handler for the container - if err := system.SetSubreaper(1); err != nil { - logrus.Warn(err) - } - } - // ensure that we have a large buffer size so that we do not miss any signals - // incase we are not processing them fast enough. - s := make(chan os.Signal, signalBufferSize) - // handle all signals for the process. - signal.Notify(s) - return &signalHandler{ - signals: s, - notifySocket: notifySocket, - } -} - -// exit models a process exit status with the pid and -// exit status. -type exit struct { - pid int - status int -} - -type signalHandler struct { - signals chan os.Signal - notifySocket *notifySocket -} - -// forward handles the main signal event loop forwarding, resizing, or reaping depending -// on the signal received. -func (h *signalHandler) forward(process *libcontainer.Process, tty *tty, detach bool) (int, error) { - // make sure we know the pid of our main process so that we can return - // after it dies. - if detach && h.notifySocket == nil { - return 0, nil - } - - pid1, err := process.Pid() - if err != nil { - return -1, err - } - - if h.notifySocket != nil { - if detach { - h.notifySocket.run(pid1) - return 0, nil - } else { - go h.notifySocket.run(0) - } - } - - // perform the initial tty resize. - tty.resize() - for s := range h.signals { - switch s { - case syscall.SIGWINCH: - tty.resize() - case syscall.SIGCHLD: - exits, err := h.reap() - if err != nil { - logrus.Error(err) - } - for _, e := range exits { - logrus.WithFields(logrus.Fields{ - "pid": e.pid, - "status": e.status, - }).Debug("process exited") - if e.pid == pid1 { - // call Wait() on the process even though we already have the exit - // status because we must ensure that any of the go specific process - // fun such as flushing pipes are complete before we return. - process.Wait() - if h.notifySocket != nil { - h.notifySocket.Close() - } - return e.status, nil - } - } - default: - logrus.Debugf("sending signal to process %s", s) - if err := syscall.Kill(pid1, s.(syscall.Signal)); err != nil { - logrus.Error(err) - } - } - } - return -1, nil -} - -// reap runs wait4 in a loop until we have finished processing any existing exits -// then returns all exits to the main event loop for further processing. -func (h *signalHandler) reap() (exits []exit, err error) { - var ( - ws syscall.WaitStatus - rus syscall.Rusage - ) - for { - pid, err := syscall.Wait4(-1, &ws, syscall.WNOHANG, &rus) - if err != nil { - if err == syscall.ECHILD { - return exits, nil - } - return nil, err - } - if pid <= 0 { - return exits, nil - } - exits = append(exits, exit{ - pid: pid, - status: utils.ExitStatus(ws), - }) - } -} diff --git a/vendor/github.com/opencontainers/runc/spec.go b/vendor/github.com/opencontainers/runc/spec.go deleted file mode 100644 index 9024ad4cd..000000000 --- a/vendor/github.com/opencontainers/runc/spec.go +++ /dev/null @@ -1,155 +0,0 @@ -// +build linux - -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "runtime" - - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/specconv" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/urfave/cli" -) - -var specCommand = cli.Command{ - Name: "spec", - Usage: "create a new specification file", - ArgsUsage: "", - Description: `The spec command creates the new specification file named "` + specConfig + `" for -the bundle. - -The spec generated is just a starter file. Editing of the spec is required to -achieve desired results. For example, the newly generated spec includes an args -parameter that is initially set to call the "sh" command when the container is -started. Calling "sh" may work for an ubuntu container or busybox, but will not -work for containers that do not include the "sh" program. - -EXAMPLE: - To run docker's hello-world container one needs to set the args parameter -in the spec to call hello. This can be done using the sed command or a text -editor. The following commands create a bundle for hello-world, change the -default args parameter in the spec from "sh" to "/hello", then run the hello -command in a new hello-world container named container1: - - mkdir hello - cd hello - docker pull hello-world - docker export $(docker create hello-world) > hello-world.tar - mkdir rootfs - tar -C rootfs -xf hello-world.tar - runc spec - sed -i 's;"sh";"/hello";' ` + specConfig + ` - runc run container1 - -In the run command above, "container1" is the name for the instance of the -container that you are starting. The name you provide for the container instance -must be unique on your host. - -An alternative for generating a customized spec config is to use "oci-runtime-tool", the -sub-command "oci-runtime-tool generate" has lots of options that can be used to do any -customizations as you want, see [runtime-tools](https://github.com/opencontainers/runtime-tools) -to get more information. - -When starting a container through runc, runc needs root privilege. If not -already running as root, you can use sudo to give runc root privilege. For -example: "sudo runc start container1" will give runc root privilege to start the -container on your host.`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "bundle, b", - Value: "", - Usage: "path to the root of the bundle directory", - }, - cli.BoolFlag{ - Name: "rootless", - Usage: "generate a configuration for a rootless container", - }, - }, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 0, exactArgs); err != nil { - return err - } - spec := specconv.Example() - - rootless := context.Bool("rootless") - if rootless { - specconv.ToRootless(spec) - } - - checkNoFile := func(name string) error { - _, err := os.Stat(name) - if err == nil { - return fmt.Errorf("File %s exists. Remove it first", name) - } - if !os.IsNotExist(err) { - return err - } - return nil - } - bundle := context.String("bundle") - if bundle != "" { - if err := os.Chdir(bundle); err != nil { - return err - } - } - if err := checkNoFile(specConfig); err != nil { - return err - } - data, err := json.MarshalIndent(spec, "", "\t") - if err != nil { - return err - } - if err := ioutil.WriteFile(specConfig, data, 0666); err != nil { - return err - } - return nil - }, -} - -func sPtr(s string) *string { return &s } - -// loadSpec loads the specification from the provided path. -func loadSpec(cPath string) (spec *specs.Spec, err error) { - cf, err := os.Open(cPath) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("JSON specification file %s not found", cPath) - } - return nil, err - } - defer cf.Close() - - if err = json.NewDecoder(cf).Decode(&spec); err != nil { - return nil, err - } - if err = validatePlatform(&spec.Platform); err != nil { - return nil, err - } - return spec, validateProcessSpec(&spec.Process) -} - -func createLibContainerRlimit(rlimit specs.LinuxRlimit) (configs.Rlimit, error) { - rl, err := strToRlimit(rlimit.Type) - if err != nil { - return configs.Rlimit{}, err - } - return configs.Rlimit{ - Type: rl, - Hard: rlimit.Hard, - Soft: rlimit.Soft, - }, nil -} - -func validatePlatform(platform *specs.Platform) error { - if platform.OS != runtime.GOOS { - return fmt.Errorf("target os %s mismatch with current os %s", platform.OS, runtime.GOOS) - } - if platform.Arch != runtime.GOARCH { - return fmt.Errorf("target arch %s mismatch with current arch %s", platform.Arch, runtime.GOARCH) - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/start.go b/vendor/github.com/opencontainers/runc/start.go deleted file mode 100644 index 2bb698b20..000000000 --- a/vendor/github.com/opencontainers/runc/start.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "errors" - "fmt" - - "github.com/opencontainers/runc/libcontainer" - "github.com/urfave/cli" -) - -var startCommand = cli.Command{ - Name: "start", - Usage: "executes the user defined process in a created container", - ArgsUsage: ` - -Where "" is your name for the instance of the container that you -are starting. The name you provide for the container instance must be unique on -your host.`, - Description: `The start command executes the user defined process in a created container.`, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, exactArgs); err != nil { - return err - } - container, err := getContainer(context) - if err != nil { - return err - } - status, err := container.Status() - if err != nil { - return err - } - switch status { - case libcontainer.Created: - return container.Exec() - case libcontainer.Stopped: - return errors.New("cannot start a container that has stopped") - case libcontainer.Running: - return errors.New("cannot start an already running container") - default: - return fmt.Errorf("cannot start a container in the %s state\n", status) - } - }, -} diff --git a/vendor/github.com/opencontainers/runc/state.go b/vendor/github.com/opencontainers/runc/state.go deleted file mode 100644 index 718813c36..000000000 --- a/vendor/github.com/opencontainers/runc/state.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build linux - -package main - -import ( - "encoding/json" - "os" - - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/utils" - "github.com/urfave/cli" -) - -var stateCommand = cli.Command{ - Name: "state", - Usage: "output the state of a container", - ArgsUsage: ` - -Where "" is your name for the instance of the container.`, - Description: `The state command outputs current state information for the -instance of a container.`, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, exactArgs); err != nil { - return err - } - container, err := getContainer(context) - if err != nil { - return err - } - containerStatus, err := container.Status() - if err != nil { - return err - } - state, err := container.State() - if err != nil { - return err - } - pid := state.BaseState.InitProcessPid - if containerStatus == libcontainer.Stopped { - pid = 0 - } - bundle, annotations := utils.Annotations(state.Config.Labels) - cs := containerState{ - Version: state.BaseState.Config.Version, - ID: state.BaseState.ID, - InitProcessPid: pid, - Status: containerStatus.String(), - Bundle: bundle, - Rootfs: state.BaseState.Config.Rootfs, - Created: state.BaseState.Created, - Annotations: annotations, - } - data, err := json.MarshalIndent(cs, "", " ") - if err != nil { - return err - } - os.Stdout.Write(data) - return nil - }, -} diff --git a/vendor/github.com/opencontainers/runc/tty.go b/vendor/github.com/opencontainers/runc/tty.go deleted file mode 100644 index 9824df144..000000000 --- a/vendor/github.com/opencontainers/runc/tty.go +++ /dev/null @@ -1,134 +0,0 @@ -// +build linux - -package main - -import ( - "fmt" - "io" - "os" - "sync" - - "github.com/docker/docker/pkg/term" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/utils" -) - -type tty struct { - console libcontainer.Console - state *term.State - closers []io.Closer - postStart []io.Closer - wg sync.WaitGroup - consoleC chan error -} - -func (t *tty) copyIO(w io.Writer, r io.ReadCloser) { - defer t.wg.Done() - io.Copy(w, r) - r.Close() -} - -// setup pipes for the process so that advanced features like c/r are able to easily checkpoint -// and restore the process's IO without depending on a host specific path or device -func setupProcessPipes(p *libcontainer.Process, rootuid, rootgid int) (*tty, error) { - i, err := p.InitializeIO(rootuid, rootgid) - if err != nil { - return nil, err - } - t := &tty{ - closers: []io.Closer{ - i.Stdin, - i.Stdout, - i.Stderr, - }, - } - // add the process's io to the post start closers if they support close - for _, cc := range []interface{}{ - p.Stdin, - p.Stdout, - p.Stderr, - } { - if c, ok := cc.(io.Closer); ok { - t.postStart = append(t.postStart, c) - } - } - go func() { - io.Copy(i.Stdin, os.Stdin) - i.Stdin.Close() - }() - t.wg.Add(2) - go t.copyIO(os.Stdout, i.Stdout) - go t.copyIO(os.Stderr, i.Stderr) - return t, nil -} - -func inheritStdio(process *libcontainer.Process) error { - process.Stdin = os.Stdin - process.Stdout = os.Stdout - process.Stderr = os.Stderr - return nil -} - -func (t *tty) recvtty(process *libcontainer.Process, socket *os.File) error { - f, err := utils.RecvFd(socket) - if err != nil { - return err - } - console := libcontainer.ConsoleFromFile(f) - go io.Copy(console, os.Stdin) - t.wg.Add(1) - go t.copyIO(os.Stdout, console) - state, err := term.SetRawTerminal(os.Stdin.Fd()) - if err != nil { - return fmt.Errorf("failed to set the terminal from the stdin: %v", err) - } - t.state = state - t.console = console - t.closers = []io.Closer{console} - return nil -} - -func (t *tty) waitConsole() error { - if t.consoleC != nil { - return <-t.consoleC - } - return nil -} - -// ClosePostStart closes any fds that are provided to the container and dup2'd -// so that we no longer have copy in our process. -func (t *tty) ClosePostStart() error { - for _, c := range t.postStart { - c.Close() - } - return nil -} - -// Close closes all open fds for the tty and/or restores the orignal -// stdin state to what it was prior to the container execution -func (t *tty) Close() error { - // ensure that our side of the fds are always closed - for _, c := range t.postStart { - c.Close() - } - // wait for the copy routines to finish before closing the fds - t.wg.Wait() - for _, c := range t.closers { - c.Close() - } - if t.state != nil { - term.RestoreTerminal(os.Stdin.Fd(), t.state) - } - return nil -} - -func (t *tty) resize() error { - if t.console == nil { - return nil - } - ws, err := term.GetWinsize(os.Stdin.Fd()) - if err != nil { - return err - } - return term.SetWinsize(t.console.File().Fd(), ws) -} diff --git a/vendor/github.com/opencontainers/runc/update.go b/vendor/github.com/opencontainers/runc/update.go deleted file mode 100644 index 7af3a9cdf..000000000 --- a/vendor/github.com/opencontainers/runc/update.go +++ /dev/null @@ -1,250 +0,0 @@ -// +build linux - -package main - -import ( - "encoding/json" - "fmt" - "os" - "strconv" - - "github.com/docker/go-units" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/urfave/cli" -) - -func i64Ptr(i int64) *int64 { return &i } -func u64Ptr(i uint64) *uint64 { return &i } -func u16Ptr(i uint16) *uint16 { return &i } - -var updateCommand = cli.Command{ - Name: "update", - Usage: "update container resource constraints", - ArgsUsage: ``, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "resources, r", - Value: "", - Usage: `path to the file containing the resources to update or '-' to read from the standard input - -The accepted format is as follow (unchanged values can be omitted): - -{ - "memory": { - "limit": 0, - "reservation": 0, - "swap": 0, - "kernel": 0, - "kernelTCP": 0 - }, - "cpu": { - "shares": 0, - "quota": 0, - "period": 0, - "realtimeRuntime": 0, - "realtimePeriod": 0, - "cpus": "", - "mems": "" - }, - "blockIO": { - "blkioWeight": 0 - } -} - -Note: if data is to be read from a file or the standard input, all -other options are ignored. -`, - }, - - cli.IntFlag{ - Name: "blkio-weight", - Usage: "Specifies per cgroup weight, range is from 10 to 1000", - }, - cli.StringFlag{ - Name: "cpu-period", - Usage: "CPU CFS period to be used for hardcapping (in usecs). 0 to use system default", - }, - cli.StringFlag{ - Name: "cpu-quota", - Usage: "CPU CFS hardcap limit (in usecs). Allowed cpu time in a given period", - }, - cli.StringFlag{ - Name: "cpu-share", - Usage: "CPU shares (relative weight vs. other containers)", - }, - cli.StringFlag{ - Name: "cpu-rt-period", - Usage: "CPU realtime period to be used for hardcapping (in usecs). 0 to use system default", - }, - cli.StringFlag{ - Name: "cpu-rt-runtime", - Usage: "CPU realtime hardcap limit (in usecs). Allowed cpu time in a given period", - }, - cli.StringFlag{ - Name: "cpuset-cpus", - Usage: "CPU(s) to use", - }, - cli.StringFlag{ - Name: "cpuset-mems", - Usage: "Memory node(s) to use", - }, - cli.StringFlag{ - Name: "kernel-memory", - Usage: "Kernel memory limit (in bytes)", - }, - cli.StringFlag{ - Name: "kernel-memory-tcp", - Usage: "Kernel memory limit (in bytes) for tcp buffer", - }, - cli.StringFlag{ - Name: "memory", - Usage: "Memory limit (in bytes)", - }, - cli.StringFlag{ - Name: "memory-reservation", - Usage: "Memory reservation or soft_limit (in bytes)", - }, - cli.StringFlag{ - Name: "memory-swap", - Usage: "Total memory usage (memory + swap); set '-1' to enable unlimited swap", - }, - }, - Action: func(context *cli.Context) error { - if err := checkArgs(context, 1, exactArgs); err != nil { - return err - } - container, err := getContainer(context) - if err != nil { - return err - } - - r := specs.LinuxResources{ - Memory: &specs.LinuxMemory{ - Limit: u64Ptr(0), - Reservation: u64Ptr(0), - Swap: u64Ptr(0), - Kernel: u64Ptr(0), - KernelTCP: u64Ptr(0), - }, - CPU: &specs.LinuxCPU{ - Shares: u64Ptr(0), - Quota: i64Ptr(0), - Period: u64Ptr(0), - RealtimeRuntime: i64Ptr(0), - RealtimePeriod: u64Ptr(0), - Cpus: "", - Mems: "", - }, - BlockIO: &specs.LinuxBlockIO{ - Weight: u16Ptr(0), - }, - } - - config := container.Config() - - if in := context.String("resources"); in != "" { - var ( - f *os.File - err error - ) - switch in { - case "-": - f = os.Stdin - default: - f, err = os.Open(in) - if err != nil { - return err - } - } - err = json.NewDecoder(f).Decode(&r) - if err != nil { - return err - } - } else { - if val := context.Int("blkio-weight"); val != 0 { - r.BlockIO.Weight = u16Ptr(uint16(val)) - } - if val := context.String("cpuset-cpus"); val != "" { - r.CPU.Cpus = val - } - if val := context.String("cpuset-mems"); val != "" { - r.CPU.Mems = val - } - - for _, pair := range []struct { - opt string - dest *uint64 - }{ - - {"cpu-period", r.CPU.Period}, - {"cpu-rt-period", r.CPU.RealtimePeriod}, - {"cpu-share", r.CPU.Shares}, - } { - if val := context.String(pair.opt); val != "" { - var err error - *pair.dest, err = strconv.ParseUint(val, 10, 64) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", pair.opt, err) - } - } - } - for _, pair := range []struct { - opt string - dest *int64 - }{ - - {"cpu-quota", r.CPU.Quota}, - {"cpu-rt-runtime", r.CPU.RealtimeRuntime}, - } { - if val := context.String(pair.opt); val != "" { - var err error - *pair.dest, err = strconv.ParseInt(val, 10, 64) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", pair.opt, err) - } - } - } - for _, pair := range []struct { - opt string - dest *uint64 - }{ - {"memory", r.Memory.Limit}, - {"memory-swap", r.Memory.Swap}, - {"kernel-memory", r.Memory.Kernel}, - {"kernel-memory-tcp", r.Memory.KernelTCP}, - {"memory-reservation", r.Memory.Reservation}, - } { - if val := context.String(pair.opt); val != "" { - var v int64 - - if val != "-1" { - v, err = units.RAMInBytes(val) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", pair.opt, err) - } - } else { - v = -1 - } - *pair.dest = uint64(v) - } - } - } - - // Update the value - config.Cgroups.Resources.BlkioWeight = *r.BlockIO.Weight - config.Cgroups.Resources.CpuPeriod = *r.CPU.Period - config.Cgroups.Resources.CpuQuota = *r.CPU.Quota - config.Cgroups.Resources.CpuShares = *r.CPU.Shares - config.Cgroups.Resources.CpuRtPeriod = *r.CPU.RealtimePeriod - config.Cgroups.Resources.CpuRtRuntime = *r.CPU.RealtimeRuntime - config.Cgroups.Resources.CpusetCpus = r.CPU.Cpus - config.Cgroups.Resources.CpusetMems = r.CPU.Mems - config.Cgroups.Resources.KernelMemory = *r.Memory.Kernel - config.Cgroups.Resources.KernelMemoryTCP = *r.Memory.KernelTCP - config.Cgroups.Resources.Memory = *r.Memory.Limit - config.Cgroups.Resources.MemoryReservation = *r.Memory.Reservation - config.Cgroups.Resources.MemorySwap = *r.Memory.Swap - - return container.Set(config) - }, -} diff --git a/vendor/github.com/opencontainers/runc/utils.go b/vendor/github.com/opencontainers/runc/utils.go deleted file mode 100644 index 98f93a4cf..000000000 --- a/vendor/github.com/opencontainers/runc/utils.go +++ /dev/null @@ -1,82 +0,0 @@ -package main - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/urfave/cli" -) - -const ( - exactArgs = iota - minArgs - maxArgs -) - -func checkArgs(context *cli.Context, expected, checkType int) error { - var err error - cmdName := context.Command.Name - switch checkType { - case exactArgs: - if context.NArg() != expected { - err = fmt.Errorf("%s: %q requires exactly %d argument(s)", os.Args[0], cmdName, expected) - } - case minArgs: - if context.NArg() < expected { - err = fmt.Errorf("%s: %q requires a minimum of %d argument(s)", os.Args[0], cmdName, expected) - } - case maxArgs: - if context.NArg() > expected { - err = fmt.Errorf("%s: %q requires a maximum of %d argument(s)", os.Args[0], cmdName, expected) - } - } - - if err != nil { - fmt.Printf("Incorrect Usage.\n\n") - cli.ShowCommandHelp(context, cmdName) - return err - } - return nil -} - -// fatal prints the error's details if it is a libcontainer specific error type -// then exits the program with an exit status of 1. -func fatal(err error) { - // make sure the error is written to the logger - logrus.Error(err) - fmt.Fprintln(os.Stderr, err) - os.Exit(1) -} - -// setupSpec performs initial setup based on the cli.Context for the container -func setupSpec(context *cli.Context) (*specs.Spec, error) { - bundle := context.String("bundle") - if bundle != "" { - if err := os.Chdir(bundle); err != nil { - return nil, err - } - } - spec, err := loadSpec(specConfig) - if err != nil { - return nil, err - } - return spec, nil -} - -func revisePidFile(context *cli.Context) error { - pidFile := context.String("pid-file") - if pidFile == "" { - return nil - } - - // convert pid-file to an absolute path so we can write to the right - // file after chdir to bundle - pidFile, err := filepath.Abs(pidFile) - if err != nil { - return err - } - return context.Set("pid-file", pidFile) -} diff --git a/vendor/github.com/opencontainers/runc/utils_linux.go b/vendor/github.com/opencontainers/runc/utils_linux.go deleted file mode 100644 index c6a8c028e..000000000 --- a/vendor/github.com/opencontainers/runc/utils_linux.go +++ /dev/null @@ -1,378 +0,0 @@ -// +build linux - -package main - -import ( - "errors" - "fmt" - "net" - "os" - "path/filepath" - "strconv" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/coreos/go-systemd/activation" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/cgroups/systemd" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/specconv" - "github.com/opencontainers/runc/libcontainer/utils" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/urfave/cli" -) - -var errEmptyID = errors.New("container id cannot be empty") - -var container libcontainer.Container - -// loadFactory returns the configured factory instance for execing containers. -func loadFactory(context *cli.Context) (libcontainer.Factory, error) { - root := context.GlobalString("root") - abs, err := filepath.Abs(root) - if err != nil { - return nil, err - } - cgroupManager := libcontainer.Cgroupfs - if context.GlobalBool("systemd-cgroup") { - if systemd.UseSystemd() { - cgroupManager = libcontainer.SystemdCgroups - } else { - return nil, fmt.Errorf("systemd cgroup flag passed, but systemd support for managing cgroups is not available") - } - } - return libcontainer.New(abs, cgroupManager, libcontainer.CriuPath(context.GlobalString("criu"))) -} - -// getContainer returns the specified container instance by loading it from state -// with the default factory. -func getContainer(context *cli.Context) (libcontainer.Container, error) { - id := context.Args().First() - if id == "" { - return nil, errEmptyID - } - factory, err := loadFactory(context) - if err != nil { - return nil, err - } - return factory.Load(id) -} - -func fatalf(t string, v ...interface{}) { - fatal(fmt.Errorf(t, v...)) -} - -func getDefaultImagePath(context *cli.Context) string { - cwd, err := os.Getwd() - if err != nil { - panic(err) - } - return filepath.Join(cwd, "checkpoint") -} - -// newProcess returns a new libcontainer Process with the arguments from the -// spec and stdio from the current process. -func newProcess(p specs.Process) (*libcontainer.Process, error) { - lp := &libcontainer.Process{ - Args: p.Args, - Env: p.Env, - // TODO: fix libcontainer's API to better support uid/gid in a typesafe way. - User: fmt.Sprintf("%d:%d", p.User.UID, p.User.GID), - Cwd: p.Cwd, - Label: p.SelinuxLabel, - NoNewPrivileges: &p.NoNewPrivileges, - AppArmorProfile: p.ApparmorProfile, - } - if p.Capabilities != nil { - lp.Capabilities = &configs.Capabilities{} - lp.Capabilities.Bounding = p.Capabilities.Bounding - lp.Capabilities.Effective = p.Capabilities.Effective - lp.Capabilities.Inheritable = p.Capabilities.Inheritable - lp.Capabilities.Permitted = p.Capabilities.Permitted - lp.Capabilities.Ambient = p.Capabilities.Ambient - } - for _, gid := range p.User.AdditionalGids { - lp.AdditionalGroups = append(lp.AdditionalGroups, strconv.FormatUint(uint64(gid), 10)) - } - for _, rlimit := range p.Rlimits { - rl, err := createLibContainerRlimit(rlimit) - if err != nil { - return nil, err - } - lp.Rlimits = append(lp.Rlimits, rl) - } - return lp, nil -} - -func destroy(container libcontainer.Container) { - if err := container.Destroy(); err != nil { - logrus.Error(err) - } -} - -// setupIO modifies the given process config according to the options. -func setupIO(process *libcontainer.Process, rootuid, rootgid int, createTTY, detach bool, sockpath string) (*tty, error) { - if createTTY { - process.Stdin = nil - process.Stdout = nil - process.Stderr = nil - t := &tty{} - if !detach { - parent, child, err := utils.NewSockPair("console") - if err != nil { - return nil, err - } - process.ConsoleSocket = child - t.postStart = append(t.postStart, parent, child) - t.consoleC = make(chan error, 1) - go func() { - if err := t.recvtty(process, parent); err != nil { - t.consoleC <- err - } - t.consoleC <- nil - }() - } else { - // the caller of runc will handle receiving the console master - conn, err := net.Dial("unix", sockpath) - if err != nil { - return nil, err - } - uc, ok := conn.(*net.UnixConn) - if !ok { - return nil, fmt.Errorf("casting to UnixConn failed") - } - t.postStart = append(t.postStart, uc) - socket, err := uc.File() - if err != nil { - return nil, err - } - t.postStart = append(t.postStart, socket) - process.ConsoleSocket = socket - } - return t, nil - } - // when runc will detach the caller provides the stdio to runc via runc's 0,1,2 - // and the container's process inherits runc's stdio. - if detach { - if err := inheritStdio(process); err != nil { - return nil, err - } - return &tty{}, nil - } - return setupProcessPipes(process, rootuid, rootgid) -} - -// createPidFile creates a file with the processes pid inside it atomically -// it creates a temp file with the paths filename + '.' infront of it -// then renames the file -func createPidFile(path string, process *libcontainer.Process) error { - pid, err := process.Pid() - if err != nil { - return err - } - var ( - tmpDir = filepath.Dir(path) - tmpName = filepath.Join(tmpDir, fmt.Sprintf(".%s", filepath.Base(path))) - ) - f, err := os.OpenFile(tmpName, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666) - if err != nil { - return err - } - _, err = fmt.Fprintf(f, "%d", pid) - f.Close() - if err != nil { - return err - } - return os.Rename(tmpName, path) -} - -// XXX: Currently we autodetect rootless mode. -func isRootless() bool { - return os.Geteuid() != 0 -} - -func createContainer(context *cli.Context, id string, spec *specs.Spec) (libcontainer.Container, error) { - config, err := specconv.CreateLibcontainerConfig(&specconv.CreateOpts{ - CgroupName: id, - UseSystemdCgroup: context.GlobalBool("systemd-cgroup"), - NoPivotRoot: context.Bool("no-pivot"), - NoNewKeyring: context.Bool("no-new-keyring"), - Spec: spec, - Rootless: isRootless(), - }) - if err != nil { - return nil, err - } - - factory, err := loadFactory(context) - if err != nil { - return nil, err - } - return factory.Create(id, config) -} - -type runner struct { - enableSubreaper bool - shouldDestroy bool - detach bool - listenFDs []*os.File - preserveFDs int - pidFile string - consoleSocket string - container libcontainer.Container - create bool - notifySocket *notifySocket -} - -func (r *runner) run(config *specs.Process) (int, error) { - if err := r.checkTerminal(config); err != nil { - r.destroy() - return -1, err - } - process, err := newProcess(*config) - if err != nil { - r.destroy() - return -1, err - } - if len(r.listenFDs) > 0 { - process.Env = append(process.Env, fmt.Sprintf("LISTEN_FDS=%d", len(r.listenFDs)), "LISTEN_PID=1") - process.ExtraFiles = append(process.ExtraFiles, r.listenFDs...) - } - baseFd := 3 + len(process.ExtraFiles) - for i := baseFd; i < baseFd+r.preserveFDs; i++ { - process.ExtraFiles = append(process.ExtraFiles, os.NewFile(uintptr(i), "PreserveFD:"+strconv.Itoa(i))) - } - rootuid, err := r.container.Config().HostRootUID() - if err != nil { - r.destroy() - return -1, err - } - rootgid, err := r.container.Config().HostRootGID() - if err != nil { - r.destroy() - return -1, err - } - var ( - detach = r.detach || r.create - startFn = r.container.Start - ) - if !r.create { - startFn = r.container.Run - } - // Setting up IO is a two stage process. We need to modify process to deal - // with detaching containers, and then we get a tty after the container has - // started. - handler := newSignalHandler(r.enableSubreaper, r.notifySocket) - tty, err := setupIO(process, rootuid, rootgid, config.Terminal, detach, r.consoleSocket) - if err != nil { - r.destroy() - return -1, err - } - defer tty.Close() - if err = startFn(process); err != nil { - r.destroy() - return -1, err - } - if err := tty.waitConsole(); err != nil { - r.terminate(process) - r.destroy() - return -1, err - } - if err = tty.ClosePostStart(); err != nil { - r.terminate(process) - r.destroy() - return -1, err - } - if r.pidFile != "" { - if err = createPidFile(r.pidFile, process); err != nil { - r.terminate(process) - r.destroy() - return -1, err - } - } - status, err := handler.forward(process, tty, detach) - if err != nil { - r.terminate(process) - } - if detach { - return 0, nil - } - r.destroy() - return status, err -} - -func (r *runner) destroy() { - if r.shouldDestroy { - destroy(r.container) - } -} - -func (r *runner) terminate(p *libcontainer.Process) { - _ = p.Signal(syscall.SIGKILL) - _, _ = p.Wait() -} - -func (r *runner) checkTerminal(config *specs.Process) error { - detach := r.detach || r.create - // Check command-line for sanity. - if detach && config.Terminal && r.consoleSocket == "" { - return fmt.Errorf("cannot allocate tty if runc will detach without setting console socket") - } - if (!detach || !config.Terminal) && r.consoleSocket != "" { - return fmt.Errorf("cannot use console socket if runc will not detach or allocate tty") - } - return nil -} - -func validateProcessSpec(spec *specs.Process) error { - if spec.Cwd == "" { - return fmt.Errorf("Cwd property must not be empty") - } - if !filepath.IsAbs(spec.Cwd) { - return fmt.Errorf("Cwd must be an absolute path") - } - if len(spec.Args) == 0 { - return fmt.Errorf("args must not be empty") - } - return nil -} - -func startContainer(context *cli.Context, spec *specs.Spec, create bool) (int, error) { - id := context.Args().First() - if id == "" { - return -1, errEmptyID - } - - notifySocket := newNotifySocket(context, os.Getenv("NOTIFY_SOCKET"), id) - if notifySocket != nil { - notifySocket.setupSpec(context, spec) - } - - container, err := createContainer(context, id, spec) - if err != nil { - return -1, err - } - - if notifySocket != nil { - notifySocket.setupSocket() - } - - // Support on-demand socket activation by passing file descriptors into the container init process. - listenFDs := []*os.File{} - if os.Getenv("LISTEN_FDS") != "" { - listenFDs = activation.Files(false) - } - r := &runner{ - enableSubreaper: !context.Bool("no-subreaper"), - shouldDestroy: true, - container: container, - listenFDs: listenFDs, - notifySocket: notifySocket, - consoleSocket: context.String("console-socket"), - detach: context.Bool("detach"), - pidFile: context.String("pid-file"), - preserveFDs: context.Int("preserve-fds"), - create: create, - } - return r.run(&spec.Process) -} diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go index 64cc40fe1..003e99fad 100644 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -559,10 +559,14 @@ type UnifiedDiff struct { func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() - w := func(format string, args ...interface{}) error { + wf := func(format string, args ...interface{}) error { _, err := buf.WriteString(fmt.Sprintf(format, args...)) return err } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } if len(diff.Eol) == 0 { diff.Eol = "\n" @@ -581,26 +585,28 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { if len(diff.ToDate) > 0 { toDate = "\t" + diff.ToDate } - err := w("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = w("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } } } first, last := g[0], g[len(g)-1] range1 := formatRangeUnified(first.I1, last.I2) range2 := formatRangeUnified(first.J1, last.J2) - if err := w("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { return err } for _, c := range g { i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 if c.Tag == 'e' { for _, line := range diff.A[i1:i2] { - if err := w(" " + line); err != nil { + if err := ws(" " + line); err != nil { return err } } @@ -608,14 +614,14 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { } if c.Tag == 'r' || c.Tag == 'd' { for _, line := range diff.A[i1:i2] { - if err := w("-" + line); err != nil { + if err := ws("-" + line); err != nil { return err } } } if c.Tag == 'r' || c.Tag == 'i' { for _, line := range diff.B[j1:j2] { - if err := w("+" + line); err != nil { + if err := ws("+" + line); err != nil { return err } } @@ -669,12 +675,18 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() var diffErr error - w := func(format string, args ...interface{}) { + wf := func(format string, args ...interface{}) { _, err := buf.WriteString(fmt.Sprintf(format, args...)) if diffErr == nil && err != nil { diffErr = err } } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } if len(diff.Eol) == 0 { diff.Eol = "\n" @@ -700,15 +712,17 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { if len(diff.ToDate) > 0 { toDate = "\t" + diff.ToDate } - w("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - w("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } } first, last := g[0], g[len(g)-1] - w("***************" + diff.Eol) + ws("***************" + diff.Eol) range1 := formatRangeContext(first.I1, last.I2) - w("*** %s ****%s", range1, diff.Eol) + wf("*** %s ****%s", range1, diff.Eol) for _, c := range g { if c.Tag == 'r' || c.Tag == 'd' { for _, cc := range g { @@ -716,7 +730,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { continue } for _, line := range diff.A[cc.I1:cc.I2] { - w(prefix[cc.Tag] + line) + ws(prefix[cc.Tag] + line) } } break @@ -724,7 +738,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { } range2 := formatRangeContext(first.J1, last.J2) - w("--- %s ----%s", range2, diff.Eol) + wf("--- %s ----%s", range2, diff.Eol) for _, c := range g { if c.Tag == 'r' || c.Tag == 'i' { for _, cc := range g { @@ -732,7 +746,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { continue } for _, line := range diff.B[cc.J1:cc.J2] { - w(prefix[cc.Tag] + line) + ws(prefix[cc.Tag] + line) } } break diff --git a/vendor/github.com/prometheus/client_model/ruby/LICENSE b/vendor/github.com/prometheus/client_model/ruby/LICENSE new file mode 100644 index 000000000..11069edd7 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/ruby/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/rancher/go-rancher-metadata/main.go b/vendor/github.com/rancher/go-rancher-metadata/main.go deleted file mode 100644 index ab4db3fd9..000000000 --- a/vendor/github.com/rancher/go-rancher-metadata/main.go +++ /dev/null @@ -1,31 +0,0 @@ -package main - -import ( - "time" - - "github.com/sirupsen/logrus" - "github.com/rancher/go-rancher-metadata/metadata" -) - -const ( - metadataUrl = "http://rancher-metadata/2015-12-19" -) - -func main() { - m := metadata.NewClient(metadataUrl) - - version := "init" - - for { - newVersion, err := m.GetVersion() - if err != nil { - logrus.Errorf("Error reading metadata version: %v", err) - } else if version == newVersion { - logrus.Debug("No changes in metadata version") - } else { - logrus.Debugf("Metadata version has changed, oldVersion=[%s], newVersion=[%s]", version, newVersion) - version = newVersion - } - time.Sleep(5 * time.Second) - } -} diff --git a/vendor/github.com/rancher/go-rancher/main.go b/vendor/github.com/rancher/go-rancher/main.go deleted file mode 100644 index e8274c8a1..000000000 --- a/vendor/github.com/rancher/go-rancher/main.go +++ /dev/null @@ -1,10 +0,0 @@ -package main - -import ( - "fmt" - _ "github.com/rancher/go-rancher/client" -) - -func main() { - fmt.Println("I have nothing to do...") -} diff --git a/vendor/github.com/stretchr/objx/LICENSE.md b/vendor/github.com/stretchr/objx/LICENSE similarity index 94% rename from vendor/github.com/stretchr/objx/LICENSE.md rename to vendor/github.com/stretchr/objx/LICENSE index 219994581..44d4d9d5a 100644 --- a/vendor/github.com/stretchr/objx/LICENSE.md +++ b/vendor/github.com/stretchr/objx/LICENSE @@ -1,8 +1,7 @@ -objx - by Mat Ryer and Tyler Bunnell - -The MIT License (MIT) +The MIT License Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go index 721bcac79..d95be0ca9 100644 --- a/vendor/github.com/stretchr/objx/accessors.go +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -77,13 +77,10 @@ func access(current, selector, value interface{}, isSet, panics bool) interface{ index := -1 var err error - // https://github.com/stretchr/objx/issues/12 if strings.Contains(thisSel, "[") { - arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) if len(arrayMatches) > 0 { - // Get the key into the map thisSel = arrayMatches[1] @@ -95,7 +92,6 @@ func access(current, selector, value interface{}, isSet, panics bool) interface{ // seriously wrong. Panic. panic("objx: Array index is not an integer. Must use array[int].") } - } } @@ -110,9 +106,8 @@ func access(current, selector, value interface{}, isSet, panics bool) interface{ if len(selSegs) <= 1 && isSet { curMSI[thisSel] = value return nil - } else { - current = curMSI[thisSel] } + current = curMSI[thisSel] default: current = nil } @@ -140,9 +135,7 @@ func access(current, selector, value interface{}, isSet, panics bool) interface{ } } - return current - } // intFromInterface converts an interface object to the largest @@ -174,6 +167,5 @@ func intFromInterface(selector interface{}) int { default: panic("objx: array access argument is not an integer type (this should never happen)") } - return value } diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go index 9cdfa9f9f..5e020f310 100644 --- a/vendor/github.com/stretchr/objx/conversions.go +++ b/vendor/github.com/stretchr/objx/conversions.go @@ -12,15 +12,11 @@ import ( // JSON converts the contained object to a JSON string // representation func (m Map) JSON() (string, error) { - result, err := json.Marshal(m) - if err != nil { err = errors.New("objx: JSON encode failed with: " + err.Error()) } - return string(result), err - } // MustJSON converts the contained object to a JSON string @@ -36,7 +32,6 @@ func (m Map) MustJSON() string { // Base64 converts the contained object to a Base64 string // representation of the JSON string representation func (m Map) Base64() (string, error) { - var buf bytes.Buffer jsonData, err := m.JSON() @@ -45,11 +40,13 @@ func (m Map) Base64() (string, error) { } encoder := base64.NewEncoder(base64.StdEncoding, &buf) - encoder.Write([]byte(jsonData)) - encoder.Close() + _, err = encoder.Write([]byte(jsonData)) + if err != nil { + return "", err + } + _ = encoder.Close() return buf.String(), nil - } // MustBase64 converts the contained object to a Base64 string @@ -67,16 +64,13 @@ func (m Map) MustBase64() string { // representation of the JSON string representation and signs it // using the provided key. func (m Map) SignedBase64(key string) (string, error) { - base64, err := m.Base64() if err != nil { return "", err } sig := HashWithKey(base64, key) - return base64 + SignatureSeparator + sig, nil - } // MustSignedBase64 converts the contained object to a Base64 string @@ -98,14 +92,11 @@ func (m Map) MustSignedBase64(key string) string { // URLValues creates a url.Values object from an Obj. This // function requires that the wrapped object be a map[string]interface{} func (m Map) URLValues() url.Values { - vals := make(url.Values) - for k, v := range m { //TODO: can this be done without sprintf? vals.Set(k, fmt.Sprintf("%v", v)) } - return vals } diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go index 47bf85e46..6d6af1a83 100644 --- a/vendor/github.com/stretchr/objx/doc.go +++ b/vendor/github.com/stretchr/objx/doc.go @@ -1,72 +1,66 @@ -// objx - Go package for dealing with maps, slices, JSON and other data. -// -// Overview -// -// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes -// a powerful `Get` method (among others) that allows you to easily and quickly get -// access to data within the map, without having to worry too much about type assertions, -// missing data, default values etc. -// -// Pattern -// -// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s -// easy. -// -// Call one of the `objx.` functions to create your `objx.Map` to get going: -// -// m, err := objx.FromJSON(json) -// -// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, -// the rest will be optimistic and try to figure things out without panicking. -// -// Use `Get` to access the value you're interested in. You can use dot and array -// notation too: -// -// m.Get("places[0].latlng") -// -// Once you have saught the `Value` you're interested in, you can use the `Is*` methods -// to determine its type. -// -// if m.Get("code").IsStr() { /* ... */ } -// -// Or you can just assume the type, and use one of the strong type methods to -// extract the real value: -// -// m.Get("code").Int() -// -// If there's no value there (or if it's the wrong type) then a default value -// will be returned, or you can be explicit about the default value. -// -// Get("code").Int(-1) -// -// If you're dealing with a slice of data as a value, Objx provides many useful -// methods for iterating, manipulating and selecting that data. You can find out more -// by exploring the index below. -// -// Reading data -// -// A simple example of how to use Objx: -// -// // use MustFromJSON to make an objx.Map from some JSON -// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) -// -// // get the details -// name := m.Get("name").Str() -// age := m.Get("age").Int() -// -// // get their nickname (or use their name if they -// // don't have one) -// nickname := m.Get("nickname").Str(name) -// -// Ranging -// -// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For -// example, to `range` the data, do what you would expect: -// -// m := objx.MustFromJSON(json) -// for key, value := range m { -// -// /* ... do your magic ... */ -// -// } +/* +Objx - Go package for dealing with maps, slices, JSON and other data. + +Overview + +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +a powerful `Get` method (among others) that allows you to easily and quickly get +access to data within the map, without having to worry too much about type assertions, +missing data, default values etc. + +Pattern + +Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, +or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, +manipulating and selecting that data. You can find out more by exploring the index below. + +Reading data + +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +Ranging + +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. +For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } +*/ package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go index eb6ed8e28..7e9389a20 100644 --- a/vendor/github.com/stretchr/objx/map.go +++ b/vendor/github.com/stretchr/objx/map.go @@ -27,7 +27,7 @@ func (m Map) Value() *Value { } // Nil represents a nil Map. -var Nil Map = New(nil) +var Nil = New(nil) // New creates a new Map containing the map[string]interface{} in the data argument. // If the data argument is not a map[string]interface, New attempts to call the @@ -49,7 +49,7 @@ func New(data interface{}) Map { // // Panics // -// Panics if any key arugment is non-string or if there are an odd number of arguments. +// Panics if any key argument is non-string or if there are an odd number of arguments. // // Example // @@ -60,16 +60,13 @@ func New(data interface{}) Map { // // creates an Map equivalent to // m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) func MSI(keyAndValuePairs ...interface{}) Map { - newMap := make(map[string]interface{}) keyAndValuePairsLen := len(keyAndValuePairs) - if keyAndValuePairsLen%2 != 0 { panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") } for i := 0; i < keyAndValuePairsLen; i = i + 2 { - key := keyAndValuePairs[i] value := keyAndValuePairs[i+1] @@ -78,11 +75,8 @@ func MSI(keyAndValuePairs ...interface{}) Map { if !keyStringOK { panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") } - newMap[keyString] = value - } - return New(newMap) } @@ -94,11 +88,9 @@ func MSI(keyAndValuePairs ...interface{}) Map { // Panics if the JSON is invalid. func MustFromJSON(jsonString string) Map { o, err := FromJSON(jsonString) - if err != nil { panic("objx: MustFromJSON failed with error: " + err.Error()) } - return o } @@ -107,16 +99,12 @@ func MustFromJSON(jsonString string) Map { // // Returns an error if the JSON is invalid. func FromJSON(jsonString string) (Map, error) { - var data interface{} err := json.Unmarshal([]byte(jsonString), &data) - if err != nil { return Nil, err } - return New(data), nil - } // FromBase64 creates a new Obj containing the data specified @@ -124,14 +112,11 @@ func FromJSON(jsonString string) (Map, error) { // // The string is an encoded JSON string returned by Base64 func FromBase64(base64String string) (Map, error) { - decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) - decoded, err := ioutil.ReadAll(decoder) if err != nil { return nil, err } - return FromJSON(string(decoded)) } @@ -140,13 +125,10 @@ func FromBase64(base64String string) (Map, error) { // // The string is an encoded JSON string returned by Base64 func MustFromBase64(base64String string) Map { - result, err := FromBase64(base64String) - if err != nil { panic("objx: MustFromBase64 failed with error: " + err.Error()) } - return result } @@ -157,14 +139,13 @@ func MustFromBase64(base64String string) Map { func FromSignedBase64(base64String, key string) (Map, error) { parts := strings.Split(base64String, SignatureSeparator) if len(parts) != 2 { - return nil, errors.New("objx: Signed base64 string is malformed.") + return nil, errors.New("objx: Signed base64 string is malformed") } sig := HashWithKey(parts[0], key) if parts[1] != sig { - return nil, errors.New("objx: Signature for base64 data does not match.") + return nil, errors.New("objx: Signature for base64 data does not match") } - return FromBase64(parts[0]) } @@ -173,13 +154,10 @@ func FromSignedBase64(base64String, key string) (Map, error) { // // The string is an encoded JSON string returned by Base64 func MustFromSignedBase64(base64String, key string) Map { - result, err := FromSignedBase64(base64String, key) - if err != nil { panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) } - return result } @@ -188,9 +166,7 @@ func MustFromSignedBase64(base64String, key string) Map { // // For queries with multiple values, the first value is selected. func FromURLQuery(query string) (Map, error) { - vals, err := url.ParseQuery(query) - if err != nil { return nil, err } @@ -199,7 +175,6 @@ func FromURLQuery(query string) (Map, error) { for k, vals := range vals { m[k] = vals[0] } - return New(m), nil } @@ -210,13 +185,9 @@ func FromURLQuery(query string) (Map, error) { // // Panics if it encounters an error func MustFromURLQuery(query string) Map { - o, err := FromURLQuery(query) - if err != nil { panic("objx: MustFromURLQuery failed with error: " + err.Error()) } - return o - } diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go index b35c86392..e7b8eb794 100644 --- a/vendor/github.com/stretchr/objx/mutations.go +++ b/vendor/github.com/stretchr/objx/mutations.go @@ -2,11 +2,10 @@ package objx // Exclude returns a new Map with the keys in the specified []string // excluded. -func (d Map) Exclude(exclude []string) Map { - +func (m Map) Exclude(exclude []string) Map { excluded := make(Map) - for k, v := range d { - var shouldInclude bool = true + for k, v := range m { + var shouldInclude = true for _, toExclude := range exclude { if k == toExclude { shouldInclude = false @@ -17,7 +16,6 @@ func (d Map) Exclude(exclude []string) Map { excluded[k] = v } } - return excluded } @@ -38,19 +36,16 @@ func (m Map) Merge(merge Map) Map { return m.Copy().MergeHere(merge) } -// Merge blends the specified map with this map and returns the current map. +// MergeHere blends the specified map with this map and returns the current map. // -// Keys that appear in both will be selected from the specified map. The original map +// Keys that appear in both will be selected from the specified map. The original map // will be modified. This method requires that // the wrapped object be a map[string]interface{} func (m Map) MergeHere(merge Map) Map { - for k, v := range merge { m[k] = v } - return m - } // Transform builds a new Obj giving the transformer a chance @@ -71,11 +66,9 @@ func (m Map) Transform(transformer func(key string, value interface{}) (string, // This method requires that the wrapped object be a map[string]interface{} func (m Map) TransformKeys(mapping map[string]string) Map { return m.Transform(func(key string, value interface{}) (string, interface{}) { - if newKey, ok := mapping[key]; ok { return newKey, value } - return key, value }) } diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go index fdd6be9cf..e052ff890 100644 --- a/vendor/github.com/stretchr/objx/security.go +++ b/vendor/github.com/stretchr/objx/security.go @@ -9,6 +9,9 @@ import ( // key. func HashWithKey(data, key string) string { hash := sha1.New() - hash.Write([]byte(data + ":" + key)) + _, err := hash.Write([]byte(data + ":" + key)) + if err != nil { + return "" + } return hex.EncodeToString(hash.Sum(nil)) } diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go index f3ecb29b9..202a91f8c 100644 --- a/vendor/github.com/stretchr/objx/type_specific_codegen.go +++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -2,7 +2,6 @@ package objx /* Inter (interface{} and []interface{}) - -------------------------------------------------- */ // Inter gets the value as a interface{}, returns the optionalDefault @@ -60,44 +59,35 @@ func (v *Value) IsInterSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { - for index, val := range v.MustInterSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInter uses the specified decider function to select items // from the []interface{}. The object contained in the result will contain // only the selected items. func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { - var selected []interface{} - v.EachInter(func(index int, val interface{}) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInter uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]interface{}. func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { - groups := make(map[string][]interface{}) - v.EachInter(func(index int, val interface{}) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -106,47 +96,37 @@ func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInter uses the specified function to replace each interface{}s // by iterating each item. The data in the returned result will be a // []interface{} containing the replaced items. func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { - arr := v.MustInterSlice() replaced := make([]interface{}, len(arr)) - v.EachInter(func(index int, val interface{}) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInter uses the specified collector function to collect a value // for each of the interface{}s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { - arr := v.MustInterSlice() collected := make([]interface{}, len(arr)) - v.EachInter(func(index int, val interface{}) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* MSI (map[string]interface{} and []map[string]interface{}) - -------------------------------------------------- */ // MSI gets the value as a map[string]interface{}, returns the optionalDefault @@ -204,44 +184,35 @@ func (v *Value) IsMSISlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { - for index, val := range v.MustMSISlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereMSI uses the specified decider function to select items // from the []map[string]interface{}. The object contained in the result will contain // only the selected items. func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { - var selected []map[string]interface{} - v.EachMSI(func(index int, val map[string]interface{}) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupMSI uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]map[string]interface{}. func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { - groups := make(map[string][]map[string]interface{}) - v.EachMSI(func(index int, val map[string]interface{}) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -250,47 +221,37 @@ func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Valu groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceMSI uses the specified function to replace each map[string]interface{}s // by iterating each item. The data in the returned result will be a // []map[string]interface{} containing the replaced items. func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { - arr := v.MustMSISlice() replaced := make([]map[string]interface{}, len(arr)) - v.EachMSI(func(index int, val map[string]interface{}) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectMSI uses the specified collector function to collect a value // for each of the map[string]interface{}s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { - arr := v.MustMSISlice() collected := make([]interface{}, len(arr)) - v.EachMSI(func(index int, val map[string]interface{}) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* ObjxMap ((Map) and [](Map)) - -------------------------------------------------- */ // ObjxMap gets the value as a (Map), returns the optionalDefault @@ -348,44 +309,35 @@ func (v *Value) IsObjxMapSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { - for index, val := range v.MustObjxMapSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereObjxMap uses the specified decider function to select items // from the [](Map). The object contained in the result will contain // only the selected items. func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { - var selected [](Map) - v.EachObjxMap(func(index int, val Map) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupObjxMap uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][](Map). func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { - groups := make(map[string][](Map)) - v.EachObjxMap(func(index int, val Map) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -394,47 +346,37 @@ func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceObjxMap uses the specified function to replace each (Map)s // by iterating each item. The data in the returned result will be a // [](Map) containing the replaced items. func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { - arr := v.MustObjxMapSlice() replaced := make([](Map), len(arr)) - v.EachObjxMap(func(index int, val Map) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectObjxMap uses the specified collector function to collect a value // for each of the (Map)s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { - arr := v.MustObjxMapSlice() collected := make([]interface{}, len(arr)) - v.EachObjxMap(func(index int, val Map) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Bool (bool and []bool) - -------------------------------------------------- */ // Bool gets the value as a bool, returns the optionalDefault @@ -492,44 +434,35 @@ func (v *Value) IsBoolSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachBool(callback func(int, bool) bool) *Value { - for index, val := range v.MustBoolSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereBool uses the specified decider function to select items // from the []bool. The object contained in the result will contain // only the selected items. func (v *Value) WhereBool(decider func(int, bool) bool) *Value { - var selected []bool - v.EachBool(func(index int, val bool) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupBool uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]bool. func (v *Value) GroupBool(grouper func(int, bool) string) *Value { - groups := make(map[string][]bool) - v.EachBool(func(index int, val bool) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -538,47 +471,37 @@ func (v *Value) GroupBool(grouper func(int, bool) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceBool uses the specified function to replace each bools // by iterating each item. The data in the returned result will be a // []bool containing the replaced items. func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { - arr := v.MustBoolSlice() replaced := make([]bool, len(arr)) - v.EachBool(func(index int, val bool) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectBool uses the specified collector function to collect a value // for each of the bools in the slice. The data returned will be a // []interface{}. func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { - arr := v.MustBoolSlice() collected := make([]interface{}, len(arr)) - v.EachBool(func(index int, val bool) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Str (string and []string) - -------------------------------------------------- */ // Str gets the value as a string, returns the optionalDefault @@ -636,44 +559,35 @@ func (v *Value) IsStrSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachStr(callback func(int, string) bool) *Value { - for index, val := range v.MustStrSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereStr uses the specified decider function to select items // from the []string. The object contained in the result will contain // only the selected items. func (v *Value) WhereStr(decider func(int, string) bool) *Value { - var selected []string - v.EachStr(func(index int, val string) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupStr uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]string. func (v *Value) GroupStr(grouper func(int, string) string) *Value { - groups := make(map[string][]string) - v.EachStr(func(index int, val string) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -682,47 +596,37 @@ func (v *Value) GroupStr(grouper func(int, string) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceStr uses the specified function to replace each strings // by iterating each item. The data in the returned result will be a // []string containing the replaced items. func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { - arr := v.MustStrSlice() replaced := make([]string, len(arr)) - v.EachStr(func(index int, val string) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectStr uses the specified collector function to collect a value // for each of the strings in the slice. The data returned will be a // []interface{}. func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { - arr := v.MustStrSlice() collected := make([]interface{}, len(arr)) - v.EachStr(func(index int, val string) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Int (int and []int) - -------------------------------------------------- */ // Int gets the value as a int, returns the optionalDefault @@ -780,44 +684,35 @@ func (v *Value) IsIntSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt(callback func(int, int) bool) *Value { - for index, val := range v.MustIntSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt uses the specified decider function to select items // from the []int. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt(decider func(int, int) bool) *Value { - var selected []int - v.EachInt(func(index int, val int) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int. func (v *Value) GroupInt(grouper func(int, int) string) *Value { - groups := make(map[string][]int) - v.EachInt(func(index int, val int) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -826,47 +721,37 @@ func (v *Value) GroupInt(grouper func(int, int) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt uses the specified function to replace each ints // by iterating each item. The data in the returned result will be a // []int containing the replaced items. func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { - arr := v.MustIntSlice() replaced := make([]int, len(arr)) - v.EachInt(func(index int, val int) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt uses the specified collector function to collect a value // for each of the ints in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { - arr := v.MustIntSlice() collected := make([]interface{}, len(arr)) - v.EachInt(func(index int, val int) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Int8 (int8 and []int8) - -------------------------------------------------- */ // Int8 gets the value as a int8, returns the optionalDefault @@ -924,44 +809,35 @@ func (v *Value) IsInt8Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt8(callback func(int, int8) bool) *Value { - for index, val := range v.MustInt8Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt8 uses the specified decider function to select items // from the []int8. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { - var selected []int8 - v.EachInt8(func(index int, val int8) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt8 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int8. func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { - groups := make(map[string][]int8) - v.EachInt8(func(index int, val int8) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -970,47 +846,37 @@ func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt8 uses the specified function to replace each int8s // by iterating each item. The data in the returned result will be a // []int8 containing the replaced items. func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { - arr := v.MustInt8Slice() replaced := make([]int8, len(arr)) - v.EachInt8(func(index int, val int8) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt8 uses the specified collector function to collect a value // for each of the int8s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { - arr := v.MustInt8Slice() collected := make([]interface{}, len(arr)) - v.EachInt8(func(index int, val int8) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Int16 (int16 and []int16) - -------------------------------------------------- */ // Int16 gets the value as a int16, returns the optionalDefault @@ -1068,44 +934,35 @@ func (v *Value) IsInt16Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt16(callback func(int, int16) bool) *Value { - for index, val := range v.MustInt16Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt16 uses the specified decider function to select items // from the []int16. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { - var selected []int16 - v.EachInt16(func(index int, val int16) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt16 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int16. func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { - groups := make(map[string][]int16) - v.EachInt16(func(index int, val int16) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1114,47 +971,37 @@ func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt16 uses the specified function to replace each int16s // by iterating each item. The data in the returned result will be a // []int16 containing the replaced items. func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { - arr := v.MustInt16Slice() replaced := make([]int16, len(arr)) - v.EachInt16(func(index int, val int16) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt16 uses the specified collector function to collect a value // for each of the int16s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { - arr := v.MustInt16Slice() collected := make([]interface{}, len(arr)) - v.EachInt16(func(index int, val int16) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Int32 (int32 and []int32) - -------------------------------------------------- */ // Int32 gets the value as a int32, returns the optionalDefault @@ -1212,44 +1059,35 @@ func (v *Value) IsInt32Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt32(callback func(int, int32) bool) *Value { - for index, val := range v.MustInt32Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt32 uses the specified decider function to select items // from the []int32. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { - var selected []int32 - v.EachInt32(func(index int, val int32) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt32 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int32. func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { - groups := make(map[string][]int32) - v.EachInt32(func(index int, val int32) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1258,47 +1096,37 @@ func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt32 uses the specified function to replace each int32s // by iterating each item. The data in the returned result will be a // []int32 containing the replaced items. func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { - arr := v.MustInt32Slice() replaced := make([]int32, len(arr)) - v.EachInt32(func(index int, val int32) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt32 uses the specified collector function to collect a value // for each of the int32s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { - arr := v.MustInt32Slice() collected := make([]interface{}, len(arr)) - v.EachInt32(func(index int, val int32) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Int64 (int64 and []int64) - -------------------------------------------------- */ // Int64 gets the value as a int64, returns the optionalDefault @@ -1356,44 +1184,35 @@ func (v *Value) IsInt64Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt64(callback func(int, int64) bool) *Value { - for index, val := range v.MustInt64Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt64 uses the specified decider function to select items // from the []int64. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { - var selected []int64 - v.EachInt64(func(index int, val int64) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt64 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int64. func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { - groups := make(map[string][]int64) - v.EachInt64(func(index int, val int64) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1402,47 +1221,37 @@ func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt64 uses the specified function to replace each int64s // by iterating each item. The data in the returned result will be a // []int64 containing the replaced items. func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { - arr := v.MustInt64Slice() replaced := make([]int64, len(arr)) - v.EachInt64(func(index int, val int64) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt64 uses the specified collector function to collect a value // for each of the int64s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { - arr := v.MustInt64Slice() collected := make([]interface{}, len(arr)) - v.EachInt64(func(index int, val int64) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Uint (uint and []uint) - -------------------------------------------------- */ // Uint gets the value as a uint, returns the optionalDefault @@ -1500,44 +1309,35 @@ func (v *Value) IsUintSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint(callback func(int, uint) bool) *Value { - for index, val := range v.MustUintSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint uses the specified decider function to select items // from the []uint. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint(decider func(int, uint) bool) *Value { - var selected []uint - v.EachUint(func(index int, val uint) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint. func (v *Value) GroupUint(grouper func(int, uint) string) *Value { - groups := make(map[string][]uint) - v.EachUint(func(index int, val uint) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1546,47 +1346,37 @@ func (v *Value) GroupUint(grouper func(int, uint) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint uses the specified function to replace each uints // by iterating each item. The data in the returned result will be a // []uint containing the replaced items. func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { - arr := v.MustUintSlice() replaced := make([]uint, len(arr)) - v.EachUint(func(index int, val uint) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint uses the specified collector function to collect a value // for each of the uints in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { - arr := v.MustUintSlice() collected := make([]interface{}, len(arr)) - v.EachUint(func(index int, val uint) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Uint8 (uint8 and []uint8) - -------------------------------------------------- */ // Uint8 gets the value as a uint8, returns the optionalDefault @@ -1644,44 +1434,35 @@ func (v *Value) IsUint8Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { - for index, val := range v.MustUint8Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint8 uses the specified decider function to select items // from the []uint8. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { - var selected []uint8 - v.EachUint8(func(index int, val uint8) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint8 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint8. func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { - groups := make(map[string][]uint8) - v.EachUint8(func(index int, val uint8) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1690,47 +1471,37 @@ func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint8 uses the specified function to replace each uint8s // by iterating each item. The data in the returned result will be a // []uint8 containing the replaced items. func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { - arr := v.MustUint8Slice() replaced := make([]uint8, len(arr)) - v.EachUint8(func(index int, val uint8) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint8 uses the specified collector function to collect a value // for each of the uint8s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { - arr := v.MustUint8Slice() collected := make([]interface{}, len(arr)) - v.EachUint8(func(index int, val uint8) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Uint16 (uint16 and []uint16) - -------------------------------------------------- */ // Uint16 gets the value as a uint16, returns the optionalDefault @@ -1788,44 +1559,35 @@ func (v *Value) IsUint16Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { - for index, val := range v.MustUint16Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint16 uses the specified decider function to select items // from the []uint16. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { - var selected []uint16 - v.EachUint16(func(index int, val uint16) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint16 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint16. func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { - groups := make(map[string][]uint16) - v.EachUint16(func(index int, val uint16) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1834,47 +1596,37 @@ func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint16 uses the specified function to replace each uint16s // by iterating each item. The data in the returned result will be a // []uint16 containing the replaced items. func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { - arr := v.MustUint16Slice() replaced := make([]uint16, len(arr)) - v.EachUint16(func(index int, val uint16) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint16 uses the specified collector function to collect a value // for each of the uint16s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { - arr := v.MustUint16Slice() collected := make([]interface{}, len(arr)) - v.EachUint16(func(index int, val uint16) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Uint32 (uint32 and []uint32) - -------------------------------------------------- */ // Uint32 gets the value as a uint32, returns the optionalDefault @@ -1932,44 +1684,35 @@ func (v *Value) IsUint32Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { - for index, val := range v.MustUint32Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint32 uses the specified decider function to select items // from the []uint32. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { - var selected []uint32 - v.EachUint32(func(index int, val uint32) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint32 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint32. func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { - groups := make(map[string][]uint32) - v.EachUint32(func(index int, val uint32) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1978,47 +1721,37 @@ func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint32 uses the specified function to replace each uint32s // by iterating each item. The data in the returned result will be a // []uint32 containing the replaced items. func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { - arr := v.MustUint32Slice() replaced := make([]uint32, len(arr)) - v.EachUint32(func(index int, val uint32) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint32 uses the specified collector function to collect a value // for each of the uint32s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { - arr := v.MustUint32Slice() collected := make([]interface{}, len(arr)) - v.EachUint32(func(index int, val uint32) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Uint64 (uint64 and []uint64) - -------------------------------------------------- */ // Uint64 gets the value as a uint64, returns the optionalDefault @@ -2076,44 +1809,35 @@ func (v *Value) IsUint64Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { - for index, val := range v.MustUint64Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint64 uses the specified decider function to select items // from the []uint64. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { - var selected []uint64 - v.EachUint64(func(index int, val uint64) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint64 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint64. func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { - groups := make(map[string][]uint64) - v.EachUint64(func(index int, val uint64) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2122,47 +1846,37 @@ func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint64 uses the specified function to replace each uint64s // by iterating each item. The data in the returned result will be a // []uint64 containing the replaced items. func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { - arr := v.MustUint64Slice() replaced := make([]uint64, len(arr)) - v.EachUint64(func(index int, val uint64) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint64 uses the specified collector function to collect a value // for each of the uint64s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { - arr := v.MustUint64Slice() collected := make([]interface{}, len(arr)) - v.EachUint64(func(index int, val uint64) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Uintptr (uintptr and []uintptr) - -------------------------------------------------- */ // Uintptr gets the value as a uintptr, returns the optionalDefault @@ -2220,44 +1934,35 @@ func (v *Value) IsUintptrSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { - for index, val := range v.MustUintptrSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUintptr uses the specified decider function to select items // from the []uintptr. The object contained in the result will contain // only the selected items. func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { - var selected []uintptr - v.EachUintptr(func(index int, val uintptr) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUintptr uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uintptr. func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { - groups := make(map[string][]uintptr) - v.EachUintptr(func(index int, val uintptr) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2266,47 +1971,37 @@ func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUintptr uses the specified function to replace each uintptrs // by iterating each item. The data in the returned result will be a // []uintptr containing the replaced items. func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { - arr := v.MustUintptrSlice() replaced := make([]uintptr, len(arr)) - v.EachUintptr(func(index int, val uintptr) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUintptr uses the specified collector function to collect a value // for each of the uintptrs in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { - arr := v.MustUintptrSlice() collected := make([]interface{}, len(arr)) - v.EachUintptr(func(index int, val uintptr) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Float32 (float32 and []float32) - -------------------------------------------------- */ // Float32 gets the value as a float32, returns the optionalDefault @@ -2364,44 +2059,35 @@ func (v *Value) IsFloat32Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { - for index, val := range v.MustFloat32Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereFloat32 uses the specified decider function to select items // from the []float32. The object contained in the result will contain // only the selected items. func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { - var selected []float32 - v.EachFloat32(func(index int, val float32) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupFloat32 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]float32. func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { - groups := make(map[string][]float32) - v.EachFloat32(func(index int, val float32) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2410,47 +2096,37 @@ func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceFloat32 uses the specified function to replace each float32s // by iterating each item. The data in the returned result will be a // []float32 containing the replaced items. func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { - arr := v.MustFloat32Slice() replaced := make([]float32, len(arr)) - v.EachFloat32(func(index int, val float32) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectFloat32 uses the specified collector function to collect a value // for each of the float32s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { - arr := v.MustFloat32Slice() collected := make([]interface{}, len(arr)) - v.EachFloat32(func(index int, val float32) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Float64 (float64 and []float64) - -------------------------------------------------- */ // Float64 gets the value as a float64, returns the optionalDefault @@ -2508,44 +2184,35 @@ func (v *Value) IsFloat64Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { - for index, val := range v.MustFloat64Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereFloat64 uses the specified decider function to select items // from the []float64. The object contained in the result will contain // only the selected items. func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { - var selected []float64 - v.EachFloat64(func(index int, val float64) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupFloat64 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]float64. func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { - groups := make(map[string][]float64) - v.EachFloat64(func(index int, val float64) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2554,47 +2221,37 @@ func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceFloat64 uses the specified function to replace each float64s // by iterating each item. The data in the returned result will be a // []float64 containing the replaced items. func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { - arr := v.MustFloat64Slice() replaced := make([]float64, len(arr)) - v.EachFloat64(func(index int, val float64) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectFloat64 uses the specified collector function to collect a value // for each of the float64s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { - arr := v.MustFloat64Slice() collected := make([]interface{}, len(arr)) - v.EachFloat64(func(index int, val float64) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Complex64 (complex64 and []complex64) - -------------------------------------------------- */ // Complex64 gets the value as a complex64, returns the optionalDefault @@ -2652,44 +2309,35 @@ func (v *Value) IsComplex64Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { - for index, val := range v.MustComplex64Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereComplex64 uses the specified decider function to select items // from the []complex64. The object contained in the result will contain // only the selected items. func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { - var selected []complex64 - v.EachComplex64(func(index int, val complex64) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupComplex64 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]complex64. func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { - groups := make(map[string][]complex64) - v.EachComplex64(func(index int, val complex64) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2698,47 +2346,37 @@ func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceComplex64 uses the specified function to replace each complex64s // by iterating each item. The data in the returned result will be a // []complex64 containing the replaced items. func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { - arr := v.MustComplex64Slice() replaced := make([]complex64, len(arr)) - v.EachComplex64(func(index int, val complex64) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectComplex64 uses the specified collector function to collect a value // for each of the complex64s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { - arr := v.MustComplex64Slice() collected := make([]interface{}, len(arr)) - v.EachComplex64(func(index int, val complex64) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* Complex128 (complex128 and []complex128) - -------------------------------------------------- */ // Complex128 gets the value as a complex128, returns the optionalDefault @@ -2796,44 +2434,35 @@ func (v *Value) IsComplex128Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { - for index, val := range v.MustComplex128Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereComplex128 uses the specified decider function to select items // from the []complex128. The object contained in the result will contain // only the selected items. func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { - var selected []complex128 - v.EachComplex128(func(index int, val complex128) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupComplex128 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]complex128. func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { - groups := make(map[string][]complex128) - v.EachComplex128(func(index int, val complex128) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2842,40 +2471,31 @@ func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceComplex128 uses the specified function to replace each complex128s // by iterating each item. The data in the returned result will be a // []complex128 containing the replaced items. func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { - arr := v.MustComplex128Slice() replaced := make([]complex128, len(arr)) - v.EachComplex128(func(index int, val complex128) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectComplex128 uses the specified collector function to collect a value // for each of the complex128s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { - arr := v.MustComplex128Slice() collected := make([]interface{}, len(arr)) - v.EachComplex128(func(index int, val complex128) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go index 7aaef06b1..956a2211d 100644 --- a/vendor/github.com/stretchr/objx/value.go +++ b/vendor/github.com/stretchr/objx/value.go @@ -1,5 +1,10 @@ package objx +import ( + "fmt" + "strconv" +) + // Value provides methods for extracting interface{} data in various // types. type Value struct { @@ -11,3 +16,41 @@ type Value struct { func (v *Value) Data() interface{} { return v.data } + +// String returns the value always as a string +func (v *Value) String() string { + switch { + case v.IsStr(): + return v.Str() + case v.IsBool(): + return strconv.FormatBool(v.Bool()) + case v.IsFloat32(): + return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32) + case v.IsFloat64(): + return strconv.FormatFloat(v.Float64(), 'f', -1, 64) + case v.IsInt(): + return strconv.FormatInt(int64(v.Int()), 10) + case v.IsInt(): + return strconv.FormatInt(int64(v.Int()), 10) + case v.IsInt8(): + return strconv.FormatInt(int64(v.Int8()), 10) + case v.IsInt16(): + return strconv.FormatInt(int64(v.Int16()), 10) + case v.IsInt32(): + return strconv.FormatInt(int64(v.Int32()), 10) + case v.IsInt64(): + return strconv.FormatInt(v.Int64(), 10) + case v.IsUint(): + return strconv.FormatUint(uint64(v.Uint()), 10) + case v.IsUint8(): + return strconv.FormatUint(uint64(v.Uint8()), 10) + case v.IsUint16(): + return strconv.FormatUint(uint64(v.Uint16()), 10) + case v.IsUint32(): + return strconv.FormatUint(uint64(v.Uint32()), 10) + case v.IsUint64(): + return strconv.FormatUint(v.Uint64(), 10) + } + + return fmt.Sprintf("%#v", v.Data()) +} diff --git a/vendor/github.com/stretchr/testify/LICENCE.txt b/vendor/github.com/stretchr/testify/LICENCE.txt deleted file mode 100644 index 473b670a7..000000000 --- a/vendor/github.com/stretchr/testify/LICENCE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell - -Please consider promoting this project if you find it useful. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT -OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go new file mode 100644 index 000000000..ae06a54e2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -0,0 +1,349 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Conditionf uses a Comparison to assert a complex condition. +func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { + return Condition(t, comp, append([]interface{}{msg}, args...)...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return Contains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + return DirExists(t, path, append([]interface{}{msg}, args...)...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Emptyf(t, obj, "error message %s", "formatted") +func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return Empty(t, object, append([]interface{}{msg}, args...)...) +} + +// Equalf asserts that two objects are equal. +// +// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { + return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { + return Error(t, err, append([]interface{}{msg}, args...)...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Failf reports a failure through +func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// FailNowf fails test +func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// Falsef asserts that the specified value is false. +// +// assert.Falsef(t, myBool, "error message %s", "formatted") +func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { + return False(t, value, append([]interface{}{msg}, args...)...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + return FileExists(t, path, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { + return Len(t, object, length, append([]interface{}{msg}, args...)...) +} + +// Nilf asserts that the specified object is nil. +// +// assert.Nilf(t, err, "error message %s", "formatted") +func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return Nil(t, object, append([]interface{}{msg}, args...)...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { + return NoError(t, err, append([]interface{}{msg}, args...)...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return NotEmpty(t, object, append([]interface{}{msg}, args...)...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// NotNilf asserts that the specified object is not nil. +// +// assert.NotNilf(t, err, "error message %s", "formatted") +func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + return NotNil(t, object, append([]interface{}{msg}, args...)...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + return NotPanics(t, f, append([]interface{}{msg}, args...)...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// NotZerof asserts that i is not the zero value for its type. +func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + return NotZero(t, i, append([]interface{}{msg}, args...)...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + return Panics(t, f, append([]interface{}{msg}, args...)...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return Subset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// Truef asserts that the specified value is true. +// +// assert.Truef(t, myBool, "error message %s", "formatted") +func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + return True(t, value, append([]interface{}{msg}, args...)...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// Zerof asserts that i is the zero value for its type. +func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + return Zero(t, i, append([]interface{}{msg}, args...)...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index aa4311ff8..ffa5428f3 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -16,36 +16,82 @@ func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool return Condition(a.t, comp, msgAndArgs...) } +// Conditionf uses a Comparison to assert a complex condition. +func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { + return Conditionf(a.t, comp, msg, args...) +} + // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") -// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// -// Returns whether the assertion was successful (true) or not (false). +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { return Contains(a.t, s, contains, msgAndArgs...) } +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return Containsf(a.t, s, contains, msg, args...) +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { + return DirExists(a.t, path, msgAndArgs...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { + return DirExistsf(a.t, path, msg, args...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) +func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + return ElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + return ElementsMatchf(a.t, listA, listB, msg, args...) +} + // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // // a.Empty(obj) -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { return Empty(a.t, object, msgAndArgs...) } +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Emptyf(obj, "error message %s", "formatted") +func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { + return Emptyf(a.t, object, msg, args...) +} + // Equal asserts that two objects are equal. // -// a.Equal(123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// a.Equal(123, 123) // // Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { return Equal(a.t, expected, actual, msgAndArgs...) } @@ -54,44 +100,81 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString, "An error was expected") -// -// Returns whether the assertion was successful (true) or not (false). +// a.EqualError(err, expectedErrorString) func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { return EqualError(a.t, theError, errString, msgAndArgs...) } +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { + return EqualErrorf(a.t, theError, errString, msg, args...) +} + // EqualValues asserts that two objects are equal or convertable to the same types // and equal. // -// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { return EqualValues(a.t, expected, actual, msgAndArgs...) } +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return EqualValuesf(a.t, expected, actual, msg, args...) +} + +// Equalf asserts that two objects are equal. +// +// a.Equalf(123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Equalf(a.t, expected, actual, msg, args...) +} + // Error asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if a.Error(err, "An error was expected") { -// assert.Equal(t, err, expectedError) +// if a.Error(err) { +// assert.Equal(t, expectedError, err) // } -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { return Error(a.t, err, msgAndArgs...) } -// Exactly asserts that two objects are equal is value and type. +// Errorf asserts that a function returned an error (i.e. not `nil`). // -// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + return Errorf(a.t, err, msg, args...) +} + +// Exactly asserts that two objects are equal in value and type. // -// Returns whether the assertion was successful (true) or not (false). +// a.Exactly(int32(123), int64(123)) func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { return Exactly(a.t, expected, actual, msgAndArgs...) } +// Exactlyf asserts that two objects are equal in value and type. +// +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return Exactlyf(a.t, expected, actual, msg, args...) +} + // Fail reports a failure through func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { return Fail(a.t, failureMessage, msgAndArgs...) @@ -102,23 +185,58 @@ func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) b return FailNow(a.t, failureMessage, msgAndArgs...) } +// FailNowf fails test +func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { + return FailNowf(a.t, failureMessage, msg, args...) +} + +// Failf reports a failure through +func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { + return Failf(a.t, failureMessage, msg, args...) +} + // False asserts that the specified value is false. // -// a.False(myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). +// a.False(myBool) func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { return False(a.t, value, msgAndArgs...) } +// Falsef asserts that the specified value is false. +// +// a.Falsef(myBool, "error message %s", "formatted") +func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { + return Falsef(a.t, value, msg, args...) +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { + return FileExists(a.t, path, msgAndArgs...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { + return FileExistsf(a.t, path, msg, args...) +} + // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // // a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { - return HTTPBodyContains(a.t, handler, method, url, values, str) +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) } // HTTPBodyNotContains asserts that a specified handler returns a @@ -127,8 +245,18 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u // a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { - return HTTPBodyNotContains(a.t, handler, method, url, values, str) +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) } // HTTPError asserts that a specified handler returns an error status code. @@ -136,8 +264,17 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string // a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPError(a.t, handler, method, url, values) +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + return HTTPError(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPErrorf(a.t, handler, method, url, values, msg, args...) } // HTTPRedirect asserts that a specified handler returns a redirect status code. @@ -145,8 +282,17 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri // a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPRedirect(a.t, handler, method, url, values) +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) } // HTTPSuccess asserts that a specified handler returns a success status code. @@ -154,34 +300,68 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s // a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPSuccess(a.t, handler, method, url, values) +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) } // Implements asserts that an object is implemented by the specified interface. // -// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +// a.Implements((*MyInterface)(nil), new(MyObject)) func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { return Implements(a.t, interfaceObject, object, msgAndArgs...) } +// Implementsf asserts that an object is implemented by the specified interface. +// +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + return Implementsf(a.t, interfaceObject, object, msg, args...) +} + // InDelta asserts that the two numerals are within delta of each other. // // a.InDelta(math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { return InDelta(a.t, expected, actual, delta, msgAndArgs...) } +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) +} + // InDeltaSlice is the same as InDelta, except it compares two slices. func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) } -// InEpsilon asserts that expected and actual have a relative error less than epsilon +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. // -// Returns whether the assertion was successful (true) or not (false). +// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + return InDeltaf(a.t, expected, actual, delta, msg, args...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) } @@ -191,80 +371,133 @@ func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, ep return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) } +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) +} + // IsType asserts that the specified objects are of the same type. func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { return IsType(a.t, expectedType, object, msgAndArgs...) } +// IsTypef asserts that the specified objects are of the same type. +func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + return IsTypef(a.t, expectedType, object, msg, args...) +} + // JSONEq asserts that two JSON strings are equivalent. // // a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { return JSONEq(a.t, expected, actual, msgAndArgs...) } +// JSONEqf asserts that two JSON strings are equivalent. +// +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { + return JSONEqf(a.t, expected, actual, msg, args...) +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// a.Len(mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). +// a.Len(mySlice, 3) func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { return Len(a.t, object, length, msgAndArgs...) } +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// a.Lenf(mySlice, 3, "error message %s", "formatted") +func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { + return Lenf(a.t, object, length, msg, args...) +} + // Nil asserts that the specified object is nil. // -// a.Nil(err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). +// a.Nil(err) func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { return Nil(a.t, object, msgAndArgs...) } +// Nilf asserts that the specified object is nil. +// +// a.Nilf(err, "error message %s", "formatted") +func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { + return Nilf(a.t, object, msg, args...) +} + // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() // if a.NoError(err) { -// assert.Equal(t, actualObj, expectedObj) +// assert.Equal(t, expectedObj, actualObj) // } -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { return NoError(a.t, err, msgAndArgs...) } +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { + return NoErrorf(a.t, err, msg, args...) +} + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { return NotContains(a.t, s, contains, msgAndArgs...) } +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + return NotContainsf(a.t, s, contains, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) // } -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { return NotEmpty(a.t, object, msgAndArgs...) } +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { + return NotEmptyf(a.t, object, msg, args...) +} + // NotEqual asserts that the specified values are NOT equal. // -// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// a.NotEqual(obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -272,81 +505,182 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr return NotEqual(a.t, expected, actual, msgAndArgs...) } +// NotEqualf asserts that the specified values are NOT equal. +// +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + return NotEqualf(a.t, expected, actual, msg, args...) +} + // NotNil asserts that the specified object is not nil. // -// a.NotNil(err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). +// a.NotNil(err) func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { return NotNil(a.t, object, msgAndArgs...) } +// NotNilf asserts that the specified object is not nil. +// +// a.NotNilf(err, "error message %s", "formatted") +func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { + return NotNilf(a.t, object, msg, args...) +} + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// a.NotPanics(func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). +// a.NotPanics(func(){ RemainCalm() }) func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { return NotPanics(a.t, f, msgAndArgs...) } +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + return NotPanicsf(a.t, f, msg, args...) +} + // NotRegexp asserts that a specified regexp does not match a string. // // a.NotRegexp(regexp.MustCompile("starts"), "it's starting") // a.NotRegexp("^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { return NotRegexp(a.t, rx, str, msgAndArgs...) } -// NotZero asserts that i is not the zero value for its type and returns the truth. +// NotRegexpf asserts that a specified regexp does not match a string. +// +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return NotRegexpf(a.t, rx, str, msg, args...) +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + return NotSubset(a.t, list, subset, msgAndArgs...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return NotSubsetf(a.t, list, subset, msg, args...) +} + +// NotZero asserts that i is not the zero value for its type. func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { return NotZero(a.t, i, msgAndArgs...) } +// NotZerof asserts that i is not the zero value for its type. +func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { + return NotZerof(a.t, i, msg, args...) +} + // Panics asserts that the code inside the specified PanicTestFunc panics. // -// a.Panics(func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). +// a.Panics(func(){ GoCrazy() }) func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { return Panics(a.t, f, msgAndArgs...) } +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + return PanicsWithValue(a.t, expected, f, msgAndArgs...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + return PanicsWithValuef(a.t, expected, f, msg, args...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + return Panicsf(a.t, f, msg, args...) +} + // Regexp asserts that a specified regexp matches a string. // // a.Regexp(regexp.MustCompile("start"), "it's starting") // a.Regexp("start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { return Regexp(a.t, rx, str, msgAndArgs...) } +// Regexpf asserts that a specified regexp matches a string. +// +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + return Regexpf(a.t, rx, str, msg, args...) +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + return Subset(a.t, list, subset, msgAndArgs...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + return Subsetf(a.t, list, subset, msg, args...) +} + // True asserts that the specified value is true. // -// a.True(myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). +// a.True(myBool) func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { return True(a.t, value, msgAndArgs...) } +// Truef asserts that the specified value is true. +// +// a.Truef(myBool, "error message %s", "formatted") +func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + return Truef(a.t, value, msg, args...) +} + // WithinDuration asserts that the two times are within duration delta of each other. // -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) } -// Zero asserts that i is the zero value for its type and returns the truth. +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + return WithinDurationf(a.t, expected, actual, delta, msg, args...) +} + +// Zero asserts that i is the zero value for its type. func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { return Zero(a.t, i, msgAndArgs...) } + +// Zerof asserts that i is the zero value for its type. +func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { + return Zerof(a.t, i, msg, args...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index d1552e5e3..47bda7786 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -4,8 +4,10 @@ import ( "bufio" "bytes" "encoding/json" + "errors" "fmt" "math" + "os" "reflect" "regexp" "runtime" @@ -18,6 +20,8 @@ import ( "github.com/pmezard/go-difflib/difflib" ) +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl + // TestingT is an interface wrapper around *testing.T type TestingT interface { Errorf(format string, args ...interface{}) @@ -38,7 +42,15 @@ func ObjectsAreEqual(expected, actual interface{}) bool { if expected == nil || actual == nil { return expected == actual } - + if exp, ok := expected.([]byte); ok { + act, ok := actual.([]byte) + if !ok { + return false + } else if exp == nil || act == nil { + return exp == nil && act == nil + } + return bytes.Equal(exp, act) + } return reflect.DeepEqual(expected, actual) } @@ -108,10 +120,12 @@ func CallerInfo() []string { } parts := strings.Split(file, "/") - dir := parts[len(parts)-2] file = parts[len(parts)-1] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + if len(parts) > 1 { + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } } // Drop the package @@ -181,7 +195,7 @@ func indentMessageLines(message string, longestLabelLen int) string { // no need to align first line because it starts at the correct location (after the label) if i != 0 { // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab - outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen +1) + "\t") + outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") } outBuf.WriteString(scanner.Text()) } @@ -218,18 +232,25 @@ func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { {"Error", failureMessage}, } + // Add test name if the Go version supports it + if n, ok := t.(interface { + Name() string + }); ok { + content = append(content, labeledContent{"Test", n.Name()}) + } + message := messageFromMsgAndArgs(msgAndArgs...) if len(message) > 0 { content = append(content, labeledContent{"Messages", message}) } - t.Errorf("\r" + getWhitespaceString() + labeledOutput(content...)) + t.Errorf("%s", "\r"+getWhitespaceString()+labeledOutput(content...)) return false } type labeledContent struct { - label string + label string content string } @@ -258,17 +279,18 @@ func labeledOutput(content ...labeledContent) string { // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - interfaceType := reflect.TypeOf(interfaceObject).Elem() + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) + } if !reflect.TypeOf(object).Implements(interfaceType) { return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) } return true - } // IsType asserts that the specified objects are of the same type. @@ -283,20 +305,23 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", + expected, actual, err), msgAndArgs...) + } if !ObjectsAreEqual(expected, actual) { diff := diff(expected, actual) expected, actual = formatUnequalValues(expected, actual) return Fail(t, fmt.Sprintf("Not equal: \n"+ "expected: %s\n"+ - "received: %s%s", expected, actual, diff), msgAndArgs...) + "actual : %s%s", expected, actual, diff), msgAndArgs...) } return true @@ -322,9 +347,7 @@ func formatUnequalValues(expected, actual interface{}) (e string, a string) { // EqualValues asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { if !ObjectsAreEqualValues(expected, actual) { @@ -332,18 +355,16 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa expected, actual = formatUnequalValues(expected, actual) return Fail(t, fmt.Sprintf("Not equal: \n"+ "expected: %s\n"+ - "received: %s%s", expected, actual, diff), msgAndArgs...) + "actual : %s%s", expected, actual, diff), msgAndArgs...) } return true } -// Exactly asserts that two objects are equal is value and type. +// Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { aType := reflect.TypeOf(expected) @@ -359,9 +380,7 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { if !isNil(object) { return true @@ -386,9 +405,7 @@ func isNil(object interface{}) bool { // Nil asserts that the specified object is nil. // -// assert.Nil(t, err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { if isNil(object) { return true @@ -396,74 +413,38 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) } -var numericZeros = []interface{}{ - int(0), - int8(0), - int16(0), - int32(0), - int64(0), - uint(0), - uint8(0), - uint16(0), - uint32(0), - uint64(0), - float32(0), - float64(0), -} - // isEmpty gets whether the specified object is considered empty or not. func isEmpty(object interface{}) bool { + // get nil case out of the way if object == nil { return true - } else if object == "" { - return true - } else if object == false { - return true - } - - for _, v := range numericZeros { - if object == v { - return true - } } objValue := reflect.ValueOf(object) switch objValue.Kind() { - case reflect.Map: - fallthrough - case reflect.Slice, reflect.Chan: - { - return (objValue.Len() == 0) - } - case reflect.Struct: - switch object.(type) { - case time.Time: - return object.(time.Time).IsZero() - } + // collection types are empty when they have no element + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + return objValue.Len() == 0 + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: - { - if objValue.IsNil() { - return true - } - switch object.(type) { - case *time.Time: - return object.(*time.Time).IsZero() - default: - return false - } + if objValue.IsNil() { + return true } + deref := objValue.Elem().Interface() + return isEmpty(deref) + // for all other types, compare against the zero value + default: + zero := reflect.Zero(objValue.Type()) + return reflect.DeepEqual(object, zero.Interface()) } - return false } // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // // assert.Empty(t, obj) -// -// Returns whether the assertion was successful (true) or not (false). func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := isEmpty(object) @@ -481,8 +462,6 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // if assert.NotEmpty(t, obj) { // assert.Equal(t, "two", obj[1]) // } -// -// Returns whether the assertion was successful (true) or not (false). func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := !isEmpty(object) @@ -509,9 +488,7 @@ func getLen(x interface{}) (ok bool, length int) { // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { ok, l := getLen(object) if !ok { @@ -526,9 +503,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // True asserts that the specified value is true. // -// assert.True(t, myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { if value != true { @@ -541,9 +516,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { // False asserts that the specified value is false. // -// assert.False(t, myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { if value != false { @@ -556,13 +529,15 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", + expected, actual, err), msgAndArgs...) + } if ObjectsAreEqual(expected, actual) { return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) @@ -613,11 +588,9 @@ func includeElement(list interface{}, element interface{}) (ok, found bool) { // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") -// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { ok, found := includeElement(s, contains) @@ -635,11 +608,9 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { ok, found := includeElement(s, contains) @@ -654,6 +625,142 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if subset == nil { + return true // we consider nil to be equal to the nil set + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + } + } + + return true +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if subset == nil { + return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if isEmpty(listA) && isEmpty(listB) { + return true + } + + aKind := reflect.TypeOf(listA).Kind() + bKind := reflect.TypeOf(listB).Kind() + + if aKind != reflect.Array && aKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) + } + + if bKind != reflect.Array && bKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) + } + + aValue := reflect.ValueOf(listA) + bValue := reflect.ValueOf(listB) + + aLen := aValue.Len() + bLen := bValue.Len() + + if aLen != bLen { + return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) + } + + // Mark indexes in bValue that we already used + visited := make([]bool, bLen) + for i := 0; i < aLen; i++ { + element := aValue.Index(i).Interface() + found := false + for j := 0; j < bLen; j++ { + if visited[j] { + continue + } + if ObjectsAreEqual(bValue.Index(j).Interface(), element) { + visited[j] = true + found = true + break + } + } + if !found { + return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) + } + } + + return true +} + // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { result := comp() @@ -691,11 +798,7 @@ func didPanic(f PanicTestFunc) (bool, interface{}) { // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { @@ -705,13 +808,26 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { return true } +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + funcDidPanic, panicValue := didPanic(f) + if !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + if panicValue != expected { + return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%v\n\r\tPanic value:\t%v", f, expected, panicValue), msgAndArgs...) + } + + return true +} + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { if funcDidPanic, panicValue := didPanic(f); funcDidPanic { @@ -723,9 +839,7 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { dt := expected.Sub(actual) @@ -763,6 +877,8 @@ func toFloat(x interface{}) (float64, bool) { xf = float64(xn) case float64: xf = float64(xn) + case time.Duration: + xf = float64(xn) default: xok = false } @@ -773,8 +889,6 @@ func toFloat(x interface{}) (float64, bool) { // InDelta asserts that the two numerals are within delta of each other. // // assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { af, aok := toFloat(expected) @@ -785,7 +899,7 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...) + return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) } if math.IsNaN(bf) { @@ -812,7 +926,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn expectedSlice := reflect.ValueOf(expected) for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) if !result { return result } @@ -821,6 +935,47 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn return true } +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Map || + reflect.TypeOf(expected).Kind() != reflect.Map { + return Fail(t, "Arguments must be maps", msgAndArgs...) + } + + expectedMap := reflect.ValueOf(expected) + actualMap := reflect.ValueOf(actual) + + if expectedMap.Len() != actualMap.Len() { + return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) + } + + for _, k := range expectedMap.MapKeys() { + ev := expectedMap.MapIndex(k) + av := actualMap.MapIndex(k) + + if !ev.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) + } + + if !av.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) + } + + if !InDelta( + t, + ev.Interface(), + av.Interface(), + delta, + msgAndArgs..., + ) { + return false + } + } + + return true +} + func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) if !aok { @@ -831,15 +986,13 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { } bf, bok := toFloat(actual) if !bok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", actual) + return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) } return math.Abs(af-bf) / math.Abs(af), nil } // InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { @@ -847,7 +1000,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd } if actualEpsilon > epsilon { return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ - " < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...) + " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) } return true @@ -882,10 +1035,8 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m // // actualObj, err := SomeFunction() // if assert.NoError(t, err) { -// assert.Equal(t, actualObj, expectedObj) +// assert.Equal(t, expectedObj, actualObj) // } -// -// Returns whether the assertion was successful (true) or not (false). func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { if err != nil { return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) @@ -897,11 +1048,9 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Error asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) // } -// -// Returns whether the assertion was successful (true) or not (false). func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { if err == nil { @@ -915,9 +1064,7 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString, "An error was expected") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { if !Error(t, theError, msgAndArgs...) { return false @@ -928,7 +1075,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte if expected != actual { return Fail(t, fmt.Sprintf("Error message not equal:\n"+ "expected: %q\n"+ - "received: %q", expected, actual), msgAndArgs...) + "actual : %q", expected, actual), msgAndArgs...) } return true } @@ -951,8 +1098,6 @@ func matchRegexp(rx interface{}, str interface{}) bool { // // assert.Regexp(t, regexp.MustCompile("start"), "it's starting") // assert.Regexp(t, "start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { match := matchRegexp(rx, str) @@ -968,8 +1113,6 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // // assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") // assert.NotRegexp(t, "^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { match := matchRegexp(rx, str) @@ -981,7 +1124,7 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf } -// Zero asserts that i is the zero value for its type and returns the truth. +// Zero asserts that i is the zero value for its type. func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) @@ -989,7 +1132,7 @@ func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { return true } -// NotZero asserts that i is not the zero value for its type and returns the truth. +// NotZero asserts that i is not the zero value for its type. func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) @@ -997,11 +1140,39 @@ func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { return true } +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) + } + return true +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if !info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) + } + return true +} + // JSONEq asserts that two JSON strings are equivalent. // // assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { var expectedJSONAsInterface, actualJSONAsInterface interface{} @@ -1061,6 +1232,22 @@ func diff(expected interface{}, actual interface{}) string { return "\n\nDiff:\n" + diff } +// validateEqualArgs checks whether provided arguments can be safely used in the +// Equal/NotEqual functions. +func validateEqualArgs(expected, actual interface{}) error { + if isFunction(expected) || isFunction(actual) { + return errors.New("cannot take func type as argument") + } + return nil +} + +func isFunction(arg interface{}) bool { + if arg == nil { + return false + } + return reflect.TypeOf(arg).Kind() == reflect.Func +} + var spewConfig = spew.ConfigState{ Indent: " ", DisablePointerAddresses: true, diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go index b867e95ea..9ad56851d 100644 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -13,4 +13,4 @@ func New(t TestingT) *Assertions { } } -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index fa7ab89b1..3101e78dd 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -8,16 +8,16 @@ import ( "strings" ) -// httpCode is a helper that returns HTTP code of the response. It returns -1 -// if building a new request fails. -func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int { +// httpCode is a helper that returns HTTP code of the response. It returns -1 and +// an error if building a new request fails. +func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) if err != nil { - return -1 + return -1, err } handler(w, req) - return w.Code + return w.Code, nil } // HTTPSuccess asserts that a specified handler returns a success status code. @@ -25,12 +25,19 @@ func httpCode(handler http.HandlerFunc, method, url string, values url.Values) i // assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) return false } - return code >= http.StatusOK && code <= http.StatusPartialContent + + isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent + if !isSuccessCode { + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isSuccessCode } // HTTPRedirect asserts that a specified handler returns a redirect status code. @@ -38,12 +45,19 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value // assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) return false } - return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect + + isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect + if !isRedirectCode { + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isRedirectCode } // HTTPError asserts that a specified handler returns an error status code. @@ -51,12 +65,19 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu // assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { +func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) return false } - return code >= http.StatusBadRequest + + isErrorCode := code >= http.StatusBadRequest + if !isErrorCode { + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isErrorCode } // HTTPBody is a helper that returns HTTP body of the response. It returns @@ -77,7 +98,7 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) s // assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { body := HTTPBody(handler, method, url, values) contains := strings.Contains(body, fmt.Sprint(str)) @@ -94,7 +115,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, // assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { body := HTTPBody(handler, method, url, values) contains := strings.Contains(body, fmt.Sprint(str)) diff --git a/vendor/github.com/stretchr/testify/doc.go b/vendor/github.com/stretchr/testify/doc.go deleted file mode 100644 index 377d5cc56..000000000 --- a/vendor/github.com/stretchr/testify/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Package testify is a set of packages that provide many tools for testifying that your code will behave as you intend. -// -// testify contains the following packages: -// -// The assert package provides a comprehensive set of assertion functions that tie in to the Go testing system. -// -// The http package contains tools to make it easier to test http activity using the Go testing system. -// -// The mock package provides a system by which it is possible to mock your objects and verify calls are happening as expected. -// -// The suite package provides a basic structure for using structs as testing suites, and methods on those structs as tests. It includes setup/teardown functionality in the way of interfaces. -package testify - -// blank imports help docs. -import ( - // assert package - _ "github.com/stretchr/testify/assert" - // http package - _ "github.com/stretchr/testify/http" - // mock package - _ "github.com/stretchr/testify/mock" -) diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index 744684702..1e232b56e 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -1,6 +1,7 @@ package mock import ( + "errors" "fmt" "reflect" "regexp" @@ -48,10 +49,15 @@ type Call struct { // Amount of times this call has been called totalCalls int + // Call to this method can be optional + optional bool + // Holds a channel that will be used to block the Return until it either // receives a message or is closed. nil means it returns immediately. WaitFor <-chan time.Time + waitTime time.Duration + // Holds a handler used to manipulate arguments content that are passed by // reference. It's useful when mocking methods such as unmarshalers or // decoders. @@ -130,7 +136,10 @@ func (c *Call) WaitUntil(w <-chan time.Time) *Call { // // Mock.On("MyMethod", arg1, arg2).After(time.Second) func (c *Call) After(d time.Duration) *Call { - return c.WaitUntil(time.After(d)) + c.lock() + defer c.unlock() + c.waitTime = d + return c } // Run sets a handler to be called before returning. It can be used when @@ -141,13 +150,22 @@ func (c *Call) After(d time.Duration) *Call { // arg := args.Get(0).(*map[string]interface{}) // arg["foo"] = "bar" // }) -func (c *Call) Run(fn func(Arguments)) *Call { +func (c *Call) Run(fn func(args Arguments)) *Call { c.lock() defer c.unlock() c.RunFn = fn return c } +// Maybe allows the method call to be optional. Not calling an optional method +// will not cause an error while asserting expectations +func (c *Call) Maybe() *Call { + c.lock() + defer c.unlock() + c.optional = true + return c +} + // On chains a new expectation description onto the mocked interface. This // allows syntax like. // @@ -214,8 +232,6 @@ func (m *Mock) On(methodName string, arguments ...interface{}) *Call { // */ func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { - m.mutex.Lock() - defer m.mutex.Unlock() for i, call := range m.ExpectedCalls { if call.Method == method && call.Repeatability > -1 { @@ -287,8 +303,16 @@ func (m *Mock) Called(arguments ...interface{}) Arguments { } parts := strings.Split(functionPath, ".") functionName := parts[len(parts)-1] + return m.MethodCalled(functionName, arguments...) +} - found, call := m.findExpectedCall(functionName, arguments...) +// MethodCalled tells the mock object that the given method has been called, and gets +// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded +// by appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { + m.mutex.Lock() + found, call := m.findExpectedCall(methodName, arguments...) if found < 0 { // we have to fail here - because we don't know what to do @@ -298,45 +322,47 @@ func (m *Mock) Called(arguments ...interface{}) Arguments { // b) the arguments are not what was expected, or // c) the developer has forgotten to add an accompanying On...Return pair. - closestFound, closestCall := m.findClosestCall(functionName, arguments...) + closestFound, closestCall := m.findClosestCall(methodName, arguments...) + m.mutex.Unlock() if closestFound { - panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true), diffArguments(arguments, closestCall.Arguments))) + panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\n", callString(methodName, arguments, true), callString(methodName, closestCall.Arguments, true), diffArguments(closestCall.Arguments, arguments))) } else { - panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo())) + panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo())) } - } else { - m.mutex.Lock() - switch { - case call.Repeatability == 1: - call.Repeatability = -1 - call.totalCalls++ - - case call.Repeatability > 1: - call.Repeatability-- - call.totalCalls++ - - case call.Repeatability == 0: - call.totalCalls++ - } - m.mutex.Unlock() } + if call.Repeatability == 1 { + call.Repeatability = -1 + } else if call.Repeatability > 1 { + call.Repeatability-- + } + call.totalCalls++ + // add the call - m.mutex.Lock() - m.Calls = append(m.Calls, *newCall(m, functionName, arguments...)) + m.Calls = append(m.Calls, *newCall(m, methodName, arguments...)) m.mutex.Unlock() // block if specified if call.WaitFor != nil { <-call.WaitFor + } else { + time.Sleep(call.waitTime) } - if call.RunFn != nil { - call.RunFn(arguments) + m.mutex.Lock() + runFn := call.RunFn + m.mutex.Unlock() + + if runFn != nil { + runFn(arguments) } - return call.ReturnArguments + m.mutex.Lock() + returnArgs := call.ReturnArguments + m.mutex.Unlock() + + return returnArgs } /* @@ -368,25 +394,26 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { // AssertExpectations asserts that everything specified with On and Return was // in fact called as expected. Calls may have occurred in any order. func (m *Mock) AssertExpectations(t TestingT) bool { + m.mutex.Lock() + defer m.mutex.Unlock() var somethingMissing bool var failedExpectations int // iterate through each expectation expectedCalls := m.expectedCalls() for _, expectedCall := range expectedCalls { - if !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { + if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { somethingMissing = true failedExpectations++ - t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + t.Logf("FAIL:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) } else { - m.mutex.Lock() if expectedCall.Repeatability > 0 { somethingMissing = true failedExpectations++ + t.Logf("FAIL:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) } else { - t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + t.Logf("PASS:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) } - m.mutex.Unlock() } } @@ -399,6 +426,8 @@ func (m *Mock) AssertExpectations(t TestingT) bool { // AssertNumberOfCalls asserts that the method was called expectedCalls times. func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { + m.mutex.Lock() + defer m.mutex.Unlock() var actualCalls int for _, call := range m.calls() { if call.Method == methodName { @@ -411,6 +440,8 @@ func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls // AssertCalled asserts that the method was called. // It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { + m.mutex.Lock() + defer m.mutex.Unlock() if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) { t.Logf("%v", m.expectedCalls()) return false @@ -421,6 +452,8 @@ func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interfac // AssertNotCalled asserts that the method was not called. // It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { + m.mutex.Lock() + defer m.mutex.Unlock() if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) { t.Logf("%v", m.expectedCalls()) return false @@ -446,14 +479,10 @@ func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { } func (m *Mock) expectedCalls() []*Call { - m.mutex.Lock() - defer m.mutex.Unlock() return append([]*Call{}, m.ExpectedCalls...) } func (m *Mock) calls() []Call { - m.mutex.Lock() - defer m.mutex.Unlock() return append([]Call{}, m.Calls...) } @@ -492,9 +521,25 @@ type argumentMatcher struct { func (f argumentMatcher) Matches(argument interface{}) bool { expectType := f.fn.Type().In(0) + expectTypeNilSupported := false + switch expectType.Kind() { + case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr: + expectTypeNilSupported = true + } - if reflect.TypeOf(argument).AssignableTo(expectType) { - result := f.fn.Call([]reflect.Value{reflect.ValueOf(argument)}) + argType := reflect.TypeOf(argument) + var arg reflect.Value + if argType == nil { + arg = reflect.New(expectType).Elem() + } else { + arg = reflect.ValueOf(argument) + } + + if argType == nil && !expectTypeNilSupported { + panic(errors.New("attempting to call matcher with nil for non-nil expected type")) + } + if argType == nil || argType.AssignableTo(expectType) { + result := f.fn.Call([]reflect.Value{arg}) return result[0].Bool() } return false @@ -514,7 +559,7 @@ func (f argumentMatcher) String() string { // // |fn|, must be a function accepting a single argument (of the expected type) // which returns a bool. If |fn| doesn't match the required signature, -// MathedBy() panics. +// MatchedBy() panics. func MatchedBy(fn interface{}) argumentMatcher { fnType := reflect.TypeOf(fn) @@ -580,10 +625,10 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { if matcher, ok := expected.(argumentMatcher); ok { if matcher.Matches(actual) { - output = fmt.Sprintf("%s\t%d: \u2705 %s matched by %s\n", output, i, actual, matcher) + output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actual, matcher) } else { differences++ - output = fmt.Sprintf("%s\t%d: \u2705 %s not matched by %s\n", output, i, actual, matcher) + output = fmt.Sprintf("%s\t%d: PASS: %s not matched by %s\n", output, i, actual, matcher) } } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { @@ -591,7 +636,7 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { // not match differences++ - output = fmt.Sprintf("%s\t%d: \u274C type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual) + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual) } } else { @@ -600,11 +645,11 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { // match - output = fmt.Sprintf("%s\t%d: \u2705 %s == %s\n", output, i, actual, expected) + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actual, expected) } else { // not match differences++ - output = fmt.Sprintf("%s\t%d: \u274C %s != %s\n", output, i, actual, expected) + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actual, expected) } } @@ -715,6 +760,10 @@ func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { } func diffArguments(expected Arguments, actual Arguments) string { + if len(expected) != len(actual) { + return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual)) + } + for x := range expected { if diffString := diff(expected[x], actual[x]); diffString != "" { return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString) diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go index d3c2ab9bc..ac71d4058 100644 --- a/vendor/github.com/stretchr/testify/require/forward_requirements.go +++ b/vendor/github.com/stretchr/testify/require/forward_requirements.go @@ -13,4 +13,4 @@ func New(t TestingT) *Assertions { } } -//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl +//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index fc567f140..ac3c30878 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -19,40 +19,100 @@ func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { } } +// Conditionf uses a Comparison to assert a complex condition. +func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interface{}) { + if !assert.Conditionf(t, comp, msg, args...) { + t.FailNow() + } +} + // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") -// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if !assert.Contains(t, s, contains, msgAndArgs...) { t.FailNow() } } +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { + if !assert.Containsf(t, s, contains, msg, args...) { + t.FailNow() + } +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { + if !assert.DirExists(t, path, msgAndArgs...) { + t.FailNow() + } +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { + if !assert.DirExistsf(t, path, msg, args...) { + t.FailNow() + } +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if !assert.ElementsMatch(t, listA, listB, msgAndArgs...) { + t.FailNow() + } +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { + if !assert.ElementsMatchf(t, listA, listB, msg, args...) { + t.FailNow() + } +} + // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // // assert.Empty(t, obj) -// -// Returns whether the assertion was successful (true) or not (false). func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if !assert.Empty(t, object, msgAndArgs...) { t.FailNow() } } +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Emptyf(t, obj, "error message %s", "formatted") +func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + if !assert.Emptyf(t, object, msg, args...) { + t.FailNow() + } +} + // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if !assert.Equal(t, expected, actual, msgAndArgs...) { t.FailNow() @@ -63,52 +123,99 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString, "An error was expected") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { if !assert.EqualError(t, theError, errString, msgAndArgs...) { t.FailNow() } } +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { + if !assert.EqualErrorf(t, theError, errString, msg, args...) { + t.FailNow() + } +} + // EqualValues asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if !assert.EqualValues(t, expected, actual, msgAndArgs...) { t.FailNow() } } +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if !assert.EqualValuesf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + +// Equalf asserts that two objects are equal. +// +// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if !assert.Equalf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + // Error asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) // } -// -// Returns whether the assertion was successful (true) or not (false). func Error(t TestingT, err error, msgAndArgs ...interface{}) { if !assert.Error(t, err, msgAndArgs...) { t.FailNow() } } -// Exactly asserts that two objects are equal is value and type. +// Errorf asserts that a function returned an error (i.e. not `nil`). // -// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func Errorf(t TestingT, err error, msg string, args ...interface{}) { + if !assert.Errorf(t, err, msg, args...) { + t.FailNow() + } +} + +// Exactly asserts that two objects are equal in value and type. // -// Returns whether the assertion was successful (true) or not (false). +// assert.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if !assert.Exactly(t, expected, actual, msgAndArgs...) { t.FailNow() } } +// Exactlyf asserts that two objects are equal in value and type. +// +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if !assert.Exactlyf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + // Fail reports a failure through func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { if !assert.Fail(t, failureMessage, msgAndArgs...) { @@ -123,25 +230,72 @@ func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { } } +// FailNowf fails test +func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) { + if !assert.FailNowf(t, failureMessage, msg, args...) { + t.FailNow() + } +} + +// Failf reports a failure through +func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { + if !assert.Failf(t, failureMessage, msg, args...) { + t.FailNow() + } +} + // False asserts that the specified value is false. // -// assert.False(t, myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { if !assert.False(t, value, msgAndArgs...) { t.FailNow() } } +// Falsef asserts that the specified value is false. +// +// assert.Falsef(t, myBool, "error message %s", "formatted") +func Falsef(t TestingT, value bool, msg string, args ...interface{}) { + if !assert.Falsef(t, value, msg, args...) { + t.FailNow() + } +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { + if !assert.FileExists(t, path, msgAndArgs...) { + t.FailNow() + } +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { + if !assert.FileExistsf(t, path, msg, args...) { + t.FailNow() + } +} + // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // // assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { - if !assert.HTTPBodyContains(t, handler, method, url, values, str) { +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + if !assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + if !assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { t.FailNow() } } @@ -152,8 +306,20 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { - if !assert.HTTPBodyNotContains(t, handler, method, url, values, str) { +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + if !assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + if !assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { t.FailNow() } } @@ -163,8 +329,19 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { - if !assert.HTTPError(t, handler, method, url, values) { +func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if !assert.HTTPError(t, handler, method, url, values, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if !assert.HTTPErrorf(t, handler, method, url, values, msg, args...) { t.FailNow() } } @@ -174,8 +351,19 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { - if !assert.HTTPRedirect(t, handler, method, url, values) { +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if !assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if !assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) { t.FailNow() } } @@ -185,32 +373,64 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { - if !assert.HTTPSuccess(t, handler, method, url, values) { +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if !assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if !assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) { t.FailNow() } } // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if !assert.Implements(t, interfaceObject, object, msgAndArgs...) { t.FailNow() } } +// Implementsf asserts that an object is implemented by the specified interface. +// +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if !assert.Implementsf(t, interfaceObject, object, msg, args...) { + t.FailNow() + } +} + // InDelta asserts that the two numerals are within delta of each other. // // assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) { t.FailNow() } } +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValues(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if !assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) { + t.FailNow() + } +} + // InDeltaSlice is the same as InDelta, except it compares two slices. func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { @@ -218,9 +438,23 @@ func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta fl } } -// InEpsilon asserts that expected and actual have a relative error less than epsilon +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if !assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) { + t.FailNow() + } +} + +// InDeltaf asserts that the two numerals are within delta of each other. // -// Returns whether the assertion was successful (true) or not (false). +// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if !assert.InDeltaf(t, expected, actual, delta, msg, args...) { + t.FailNow() + } +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { t.FailNow() @@ -234,6 +468,20 @@ func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilo } } +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + if !assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) { + t.FailNow() + } +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + if !assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) { + t.FailNow() + } +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if !assert.IsType(t, expectedType, object, msgAndArgs...) { @@ -241,87 +489,144 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs } } +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { + if !assert.IsTypef(t, expectedType, object, msg, args...) { + t.FailNow() + } +} + // JSONEq asserts that two JSON strings are equivalent. // // assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if !assert.JSONEq(t, expected, actual, msgAndArgs...) { t.FailNow() } } +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if !assert.JSONEqf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { if !assert.Len(t, object, length, msgAndArgs...) { t.FailNow() } } +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { + if !assert.Lenf(t, object, length, msg, args...) { + t.FailNow() + } +} + // Nil asserts that the specified object is nil. // -// assert.Nil(t, err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if !assert.Nil(t, object, msgAndArgs...) { t.FailNow() } } +// Nilf asserts that the specified object is nil. +// +// assert.Nilf(t, err, "error message %s", "formatted") +func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { + if !assert.Nilf(t, object, msg, args...) { + t.FailNow() + } +} + // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() // if assert.NoError(t, err) { -// assert.Equal(t, actualObj, expectedObj) +// assert.Equal(t, expectedObj, actualObj) // } -// -// Returns whether the assertion was successful (true) or not (false). func NoError(t TestingT, err error, msgAndArgs ...interface{}) { if !assert.NoError(t, err, msgAndArgs...) { t.FailNow() } } +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { + if !assert.NoErrorf(t, err, msg, args...) { + t.FailNow() + } +} + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if !assert.NotContains(t, s, contains, msgAndArgs...) { t.FailNow() } } +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { + if !assert.NotContainsf(t, s, contains, msg, args...) { + t.FailNow() + } +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // // if assert.NotEmpty(t, obj) { // assert.Equal(t, "two", obj[1]) // } -// -// Returns whether the assertion was successful (true) or not (false). func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if !assert.NotEmpty(t, object, msgAndArgs...) { t.FailNow() } } +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + if !assert.NotEmptyf(t, object, msg, args...) { + t.FailNow() + } +} + // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -331,99 +636,232 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . } } +// NotEqualf asserts that the specified values are NOT equal. +// +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if !assert.NotEqualf(t, expected, actual, msg, args...) { + t.FailNow() + } +} + // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if !assert.NotNil(t, object, msgAndArgs...) { t.FailNow() } } +// NotNilf asserts that the specified object is not nil. +// +// assert.NotNilf(t, err, "error message %s", "formatted") +func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { + if !assert.NotNilf(t, object, msg, args...) { + t.FailNow() + } +} + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if !assert.NotPanics(t, f, msgAndArgs...) { t.FailNow() } } +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { + if !assert.NotPanicsf(t, f, msg, args...) { + t.FailNow() + } +} + // NotRegexp asserts that a specified regexp does not match a string. // // assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") // assert.NotRegexp(t, "^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if !assert.NotRegexp(t, rx, str, msgAndArgs...) { t.FailNow() } } -// NotZero asserts that i is not the zero value for its type and returns the truth. +// NotRegexpf asserts that a specified regexp does not match a string. +// +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if !assert.NotRegexpf(t, rx, str, msg, args...) { + t.FailNow() + } +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if !assert.NotSubset(t, list, subset, msgAndArgs...) { + t.FailNow() + } +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { + if !assert.NotSubsetf(t, list, subset, msg, args...) { + t.FailNow() + } +} + +// NotZero asserts that i is not the zero value for its type. func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { if !assert.NotZero(t, i, msgAndArgs...) { t.FailNow() } } +// NotZerof asserts that i is not the zero value for its type. +func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { + if !assert.NotZerof(t, i, msg, args...) { + t.FailNow() + } +} + // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if !assert.Panics(t, f, msgAndArgs...) { t.FailNow() } } +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.PanicsWithValue(t, expected, f, msgAndArgs...) { + t.FailNow() + } +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + if !assert.PanicsWithValuef(t, expected, f, msg, args...) { + t.FailNow() + } +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { + if !assert.Panicsf(t, f, msg, args...) { + t.FailNow() + } +} + // Regexp asserts that a specified regexp matches a string. // // assert.Regexp(t, regexp.MustCompile("start"), "it's starting") // assert.Regexp(t, "start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if !assert.Regexp(t, rx, str, msgAndArgs...) { t.FailNow() } } +// Regexpf asserts that a specified regexp matches a string. +// +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if !assert.Regexpf(t, rx, str, msg, args...) { + t.FailNow() + } +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if !assert.Subset(t, list, subset, msgAndArgs...) { + t.FailNow() + } +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { + if !assert.Subsetf(t, list, subset, msg, args...) { + t.FailNow() + } +} + // True asserts that the specified value is true. // -// assert.True(t, myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { if !assert.True(t, value, msgAndArgs...) { t.FailNow() } } +// Truef asserts that the specified value is true. +// +// assert.Truef(t, myBool, "error message %s", "formatted") +func Truef(t TestingT, value bool, msg string, args ...interface{}) { + if !assert.Truef(t, value, msg, args...) { + t.FailNow() + } +} + // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { t.FailNow() } } -// Zero asserts that i is the zero value for its type and returns the truth. +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + if !assert.WithinDurationf(t, expected, actual, delta, msg, args...) { + t.FailNow() + } +} + +// Zero asserts that i is the zero value for its type. func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { if !assert.Zero(t, i, msgAndArgs...) { t.FailNow() } } + +// Zerof asserts that i is the zero value for its type. +func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) { + if !assert.Zerof(t, i, msg, args...) { + t.FailNow() + } +} diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index caa18793d..299ceb95a 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -17,36 +17,82 @@ func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{} Condition(a.t, comp, msgAndArgs...) } +// Conditionf uses a Comparison to assert a complex condition. +func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...interface{}) { + Conditionf(a.t, comp, msg, args...) +} + // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") -// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// -// Returns whether the assertion was successful (true) or not (false). +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { Contains(a.t, s, contains, msgAndArgs...) } +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + Containsf(a.t, s, contains, msg, args...) +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) { + DirExists(a.t, path, msgAndArgs...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) { + DirExistsf(a.t, path, msg, args...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) +func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + ElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + ElementsMatchf(a.t, listA, listB, msg, args...) +} + // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // // a.Empty(obj) -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { Empty(a.t, object, msgAndArgs...) } +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Emptyf(obj, "error message %s", "formatted") +func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { + Emptyf(a.t, object, msg, args...) +} + // Equal asserts that two objects are equal. // -// a.Equal(123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// a.Equal(123, 123) // // Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { Equal(a.t, expected, actual, msgAndArgs...) } @@ -55,44 +101,81 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString, "An error was expected") -// -// Returns whether the assertion was successful (true) or not (false). +// a.EqualError(err, expectedErrorString) func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { EqualError(a.t, theError, errString, msgAndArgs...) } +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) { + EqualErrorf(a.t, theError, errString, msg, args...) +} + // EqualValues asserts that two objects are equal or convertable to the same types // and equal. // -// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { EqualValues(a.t, expected, actual, msgAndArgs...) } +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + EqualValuesf(a.t, expected, actual, msg, args...) +} + +// Equalf asserts that two objects are equal. +// +// a.Equalf(123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + Equalf(a.t, expected, actual, msg, args...) +} + // Error asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if a.Error(err, "An error was expected") { -// assert.Equal(t, err, expectedError) +// if a.Error(err) { +// assert.Equal(t, expectedError, err) // } -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { Error(a.t, err, msgAndArgs...) } -// Exactly asserts that two objects are equal is value and type. +// Errorf asserts that a function returned an error (i.e. not `nil`). // -// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { + Errorf(a.t, err, msg, args...) +} + +// Exactly asserts that two objects are equal in value and type. // -// Returns whether the assertion was successful (true) or not (false). +// a.Exactly(int32(123), int64(123)) func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { Exactly(a.t, expected, actual, msgAndArgs...) } +// Exactlyf asserts that two objects are equal in value and type. +// +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + Exactlyf(a.t, expected, actual, msg, args...) +} + // Fail reports a failure through func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) { Fail(a.t, failureMessage, msgAndArgs...) @@ -103,23 +186,58 @@ func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) { FailNow(a.t, failureMessage, msgAndArgs...) } +// FailNowf fails test +func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) { + FailNowf(a.t, failureMessage, msg, args...) +} + +// Failf reports a failure through +func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) { + Failf(a.t, failureMessage, msg, args...) +} + // False asserts that the specified value is false. // -// a.False(myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). +// a.False(myBool) func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { False(a.t, value, msgAndArgs...) } +// Falsef asserts that the specified value is false. +// +// a.Falsef(myBool, "error message %s", "formatted") +func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) { + Falsef(a.t, value, msg, args...) +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) { + FileExists(a.t, path, msgAndArgs...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { + FileExistsf(a.t, path, msg, args...) +} + // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // // a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { - HTTPBodyContains(a.t, handler, method, url, values, str) +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) } // HTTPBodyNotContains asserts that a specified handler returns a @@ -128,8 +246,18 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u // a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { - HTTPBodyNotContains(a.t, handler, method, url, values, str) +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) } // HTTPError asserts that a specified handler returns an error status code. @@ -137,8 +265,17 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string // a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) { - HTTPError(a.t, handler, method, url, values) +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + HTTPError(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + HTTPErrorf(a.t, handler, method, url, values, msg, args...) } // HTTPRedirect asserts that a specified handler returns a redirect status code. @@ -146,8 +283,17 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri // a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) { - HTTPRedirect(a.t, handler, method, url, values) +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + HTTPRedirectf(a.t, handler, method, url, values, msg, args...) } // HTTPSuccess asserts that a specified handler returns a success status code. @@ -155,34 +301,68 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s // a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) { - HTTPSuccess(a.t, handler, method, url, values) +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + HTTPSuccessf(a.t, handler, method, url, values, msg, args...) } // Implements asserts that an object is implemented by the specified interface. // -// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +// a.Implements((*MyInterface)(nil), new(MyObject)) func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { Implements(a.t, interfaceObject, object, msgAndArgs...) } +// Implementsf asserts that an object is implemented by the specified interface. +// +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + Implementsf(a.t, interfaceObject, object, msg, args...) +} + // InDelta asserts that the two numerals are within delta of each other. // // a.InDelta(math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { InDelta(a.t, expected, actual, delta, msgAndArgs...) } +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) +} + // InDeltaSlice is the same as InDelta, except it compares two slices. func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) } -// InEpsilon asserts that expected and actual have a relative error less than epsilon +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + InDeltaSlicef(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. // -// Returns whether the assertion was successful (true) or not (false). +// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + InDeltaf(a.t, expected, actual, delta, msg, args...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) } @@ -192,80 +372,133 @@ func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, ep InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) } +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + InEpsilonf(a.t, expected, actual, epsilon, msg, args...) +} + // IsType asserts that the specified objects are of the same type. func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { IsType(a.t, expectedType, object, msgAndArgs...) } +// IsTypef asserts that the specified objects are of the same type. +func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { + IsTypef(a.t, expectedType, object, msg, args...) +} + // JSONEq asserts that two JSON strings are equivalent. // // a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { JSONEq(a.t, expected, actual, msgAndArgs...) } +// JSONEqf asserts that two JSON strings are equivalent. +// +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) { + JSONEqf(a.t, expected, actual, msg, args...) +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// a.Len(mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). +// a.Len(mySlice, 3) func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { Len(a.t, object, length, msgAndArgs...) } +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// a.Lenf(mySlice, 3, "error message %s", "formatted") +func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) { + Lenf(a.t, object, length, msg, args...) +} + // Nil asserts that the specified object is nil. // -// a.Nil(err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). +// a.Nil(err) func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { Nil(a.t, object, msgAndArgs...) } +// Nilf asserts that the specified object is nil. +// +// a.Nilf(err, "error message %s", "formatted") +func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) { + Nilf(a.t, object, msg, args...) +} + // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() // if a.NoError(err) { -// assert.Equal(t, actualObj, expectedObj) +// assert.Equal(t, expectedObj, actualObj) // } -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { NoError(a.t, err, msgAndArgs...) } +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) { + NoErrorf(a.t, err, msg, args...) +} + // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { NotContains(a.t, s, contains, msgAndArgs...) } +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + NotContainsf(a.t, s, contains, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) // } -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { NotEmpty(a.t, object, msgAndArgs...) } +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) { + NotEmptyf(a.t, object, msg, args...) +} + // NotEqual asserts that the specified values are NOT equal. // -// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). +// a.NotEqual(obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -273,81 +506,182 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr NotEqual(a.t, expected, actual, msgAndArgs...) } +// NotEqualf asserts that the specified values are NOT equal. +// +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + NotEqualf(a.t, expected, actual, msg, args...) +} + // NotNil asserts that the specified object is not nil. // -// a.NotNil(err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). +// a.NotNil(err) func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { NotNil(a.t, object, msgAndArgs...) } +// NotNilf asserts that the specified object is not nil. +// +// a.NotNilf(err, "error message %s", "formatted") +func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) { + NotNilf(a.t, object, msg, args...) +} + // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// a.NotPanics(func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). +// a.NotPanics(func(){ RemainCalm() }) func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { NotPanics(a.t, f, msgAndArgs...) } +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + NotPanicsf(a.t, f, msg, args...) +} + // NotRegexp asserts that a specified regexp does not match a string. // // a.NotRegexp(regexp.MustCompile("starts"), "it's starting") // a.NotRegexp("^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { NotRegexp(a.t, rx, str, msgAndArgs...) } -// NotZero asserts that i is not the zero value for its type and returns the truth. +// NotRegexpf asserts that a specified regexp does not match a string. +// +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + NotRegexpf(a.t, rx, str, msg, args...) +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + NotSubset(a.t, list, subset, msgAndArgs...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + NotSubsetf(a.t, list, subset, msg, args...) +} + +// NotZero asserts that i is not the zero value for its type. func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) { NotZero(a.t, i, msgAndArgs...) } +// NotZerof asserts that i is not the zero value for its type. +func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) { + NotZerof(a.t, i, msg, args...) +} + // Panics asserts that the code inside the specified PanicTestFunc panics. // -// a.Panics(func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). +// a.Panics(func(){ GoCrazy() }) func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { Panics(a.t, f, msgAndArgs...) } +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + PanicsWithValue(a.t, expected, f, msgAndArgs...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + PanicsWithValuef(a.t, expected, f, msg, args...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + Panicsf(a.t, f, msg, args...) +} + // Regexp asserts that a specified regexp matches a string. // // a.Regexp(regexp.MustCompile("start"), "it's starting") // a.Regexp("start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { Regexp(a.t, rx, str, msgAndArgs...) } +// Regexpf asserts that a specified regexp matches a string. +// +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + Regexpf(a.t, rx, str, msg, args...) +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + Subset(a.t, list, subset, msgAndArgs...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + Subsetf(a.t, list, subset, msg, args...) +} + // True asserts that the specified value is true. // -// a.True(myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). +// a.True(myBool) func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { True(a.t, value, msgAndArgs...) } +// Truef asserts that the specified value is true. +// +// a.Truef(myBool, "error message %s", "formatted") +func (a *Assertions) Truef(value bool, msg string, args ...interface{}) { + Truef(a.t, value, msg, args...) +} + // WithinDuration asserts that the two times are within duration delta of each other. // -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { WithinDuration(a.t, expected, actual, delta, msgAndArgs...) } -// Zero asserts that i is the zero value for its type and returns the truth. +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + WithinDurationf(a.t, expected, actual, delta, msg, args...) +} + +// Zero asserts that i is the zero value for its type. func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { Zero(a.t, i, msgAndArgs...) } + +// Zerof asserts that i is the zero value for its type. +func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) { + Zerof(a.t, i, msg, args...) +} diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index 41147562d..e404f016d 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -6,4 +6,4 @@ type TestingT interface { FailNow() } -//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl +//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/LICENSE b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/LICENSE new file mode 100644 index 000000000..c28adbadd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012 Julian Berman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/xenolf/lego/account.go b/vendor/github.com/xenolf/lego/account.go deleted file mode 100644 index 34856e16f..000000000 --- a/vendor/github.com/xenolf/lego/account.go +++ /dev/null @@ -1,109 +0,0 @@ -package main - -import ( - "crypto" - "encoding/json" - "io/ioutil" - "os" - "path" - - "github.com/xenolf/lego/acme" -) - -// Account represents a users local saved credentials -type Account struct { - Email string `json:"email"` - key crypto.PrivateKey - Registration *acme.RegistrationResource `json:"registration"` - - conf *Configuration -} - -// NewAccount creates a new account for an email address -func NewAccount(email string, conf *Configuration) *Account { - accKeysPath := conf.AccountKeysPath(email) - // TODO: move to function in configuration? - accKeyPath := accKeysPath + string(os.PathSeparator) + email + ".key" - if err := checkFolder(accKeysPath); err != nil { - logger().Fatalf("Could not check/create directory for account %s: %v", email, err) - } - - var privKey crypto.PrivateKey - if _, err := os.Stat(accKeyPath); os.IsNotExist(err) { - - logger().Printf("No key found for account %s. Generating a curve P384 EC key.", email) - privKey, err = generatePrivateKey(accKeyPath) - if err != nil { - logger().Fatalf("Could not generate RSA private account key for account %s: %v", email, err) - } - - logger().Printf("Saved key to %s", accKeyPath) - } else { - privKey, err = loadPrivateKey(accKeyPath) - if err != nil { - logger().Fatalf("Could not load RSA private key from file %s: %v", accKeyPath, err) - } - } - - accountFile := path.Join(conf.AccountPath(email), "account.json") - if _, err := os.Stat(accountFile); os.IsNotExist(err) { - return &Account{Email: email, key: privKey, conf: conf} - } - - fileBytes, err := ioutil.ReadFile(accountFile) - if err != nil { - logger().Fatalf("Could not load file for account %s -> %v", email, err) - } - - var acc Account - err = json.Unmarshal(fileBytes, &acc) - if err != nil { - logger().Fatalf("Could not parse file for account %s -> %v", email, err) - } - - acc.key = privKey - acc.conf = conf - - if acc.Registration == nil { - logger().Fatalf("Could not load account for %s. Registration is nil.", email) - } - - if acc.conf == nil { - logger().Fatalf("Could not load account for %s. Configuration is nil.", email) - } - - return &acc -} - -/** Implementation of the acme.User interface **/ - -// GetEmail returns the email address for the account -func (a *Account) GetEmail() string { - return a.Email -} - -// GetPrivateKey returns the private RSA account key. -func (a *Account) GetPrivateKey() crypto.PrivateKey { - return a.key -} - -// GetRegistration returns the server registration -func (a *Account) GetRegistration() *acme.RegistrationResource { - return a.Registration -} - -/** End **/ - -// Save the account to disk -func (a *Account) Save() error { - jsonBytes, err := json.MarshalIndent(a, "", "\t") - if err != nil { - return err - } - - return ioutil.WriteFile( - path.Join(a.conf.AccountPath(a.Email), "account.json"), - jsonBytes, - 0600, - ) -} diff --git a/vendor/github.com/xenolf/lego/cli.go b/vendor/github.com/xenolf/lego/cli.go deleted file mode 100644 index 3aac9e253..000000000 --- a/vendor/github.com/xenolf/lego/cli.go +++ /dev/null @@ -1,232 +0,0 @@ -// Let's Encrypt client to go! -// CLI application for generating Let's Encrypt certificates using the ACME package. -package main - -import ( - "fmt" - "log" - "os" - "path" - "strings" - "text/tabwriter" - - "github.com/urfave/cli" - "github.com/xenolf/lego/acme" -) - -// Logger is used to log errors; if nil, the default log.Logger is used. -var Logger *log.Logger - -// logger is an helper function to retrieve the available logger -func logger() *log.Logger { - if Logger == nil { - Logger = log.New(os.Stderr, "", log.LstdFlags) - } - return Logger -} - -var gittag string - -func main() { - app := cli.NewApp() - app.Name = "lego" - app.Usage = "Let's Encrypt client written in Go" - - version := "0.4.1" - if strings.HasPrefix(gittag, "v") { - version = gittag - } - - app.Version = version - - acme.UserAgent = "lego/" + app.Version - - defaultPath := "" - cwd, err := os.Getwd() - if err == nil { - defaultPath = path.Join(cwd, ".lego") - } - - app.Before = func(c *cli.Context) error { - if c.GlobalString("path") == "" { - logger().Fatal("Could not determine current working directory. Please pass --path.") - } - return nil - } - - app.Commands = []cli.Command{ - { - Name: "run", - Usage: "Register an account, then create and install a certificate", - Action: run, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "no-bundle", - Usage: "Do not create a certificate bundle by adding the issuers certificate to the new certificate.", - }, - cli.BoolFlag{ - Name: "must-staple", - Usage: "Include the OCSP must staple TLS extension in the CSR and generated certificate. Only works if the CSR is generated by lego.", - }, - }, - }, - { - Name: "revoke", - Usage: "Revoke a certificate", - Action: revoke, - }, - { - Name: "renew", - Usage: "Renew a certificate", - Action: renew, - Flags: []cli.Flag{ - cli.IntFlag{ - Name: "days", - Value: 0, - Usage: "The number of days left on a certificate to renew it.", - }, - cli.BoolFlag{ - Name: "reuse-key", - Usage: "Used to indicate you want to reuse your current private key for the new certificate.", - }, - cli.BoolFlag{ - Name: "no-bundle", - Usage: "Do not create a certificate bundle by adding the issuers certificate to the new certificate.", - }, - cli.BoolFlag{ - Name: "must-staple", - Usage: "Include the OCSP must staple TLS extension in the CSR and generated certificate. Only works if the CSR is generated by lego.", - }, - }, - }, - { - Name: "dnshelp", - Usage: "Shows additional help for the --dns global option", - Action: dnshelp, - }, - } - - app.Flags = []cli.Flag{ - cli.StringSliceFlag{ - Name: "domains, d", - Usage: "Add a domain to the process. Can be specified multiple times.", - }, - cli.StringFlag{ - Name: "csr, c", - Usage: "Certificate signing request filename, if an external CSR is to be used", - }, - cli.StringFlag{ - Name: "server, s", - Value: "https://acme-v01.api.letsencrypt.org/directory", - Usage: "CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client.", - }, - cli.StringFlag{ - Name: "email, m", - Usage: "Email used for registration and recovery contact.", - }, - cli.BoolFlag{ - Name: "accept-tos, a", - Usage: "By setting this flag to true you indicate that you accept the current Let's Encrypt terms of service.", - }, - cli.StringFlag{ - Name: "key-type, k", - Value: "rsa2048", - Usage: "Key type to use for private keys. Supported: rsa2048, rsa4096, rsa8192, ec256, ec384", - }, - cli.StringFlag{ - Name: "path", - Usage: "Directory to use for storing the data", - Value: defaultPath, - }, - cli.StringSliceFlag{ - Name: "exclude, x", - Usage: "Explicitly disallow solvers by name from being used. Solvers: \"http-01\", \"tls-sni-01\".", - }, - cli.StringFlag{ - Name: "webroot", - Usage: "Set the webroot folder to use for HTTP based challenges to write directly in a file in .well-known/acme-challenge", - }, - cli.StringSliceFlag{ - Name: "memcached-host", - Usage: "Set the memcached host(s) to use for HTTP based challenges. Challenges will be written to all specified hosts.", - }, - cli.StringFlag{ - Name: "http", - Usage: "Set the port and interface to use for HTTP based challenges to listen on. Supported: interface:port or :port", - }, - cli.StringFlag{ - Name: "tls", - Usage: "Set the port and interface to use for TLS based challenges to listen on. Supported: interface:port or :port", - }, - cli.StringFlag{ - Name: "dns", - Usage: "Solve a DNS challenge using the specified provider. Disables all other challenges. Run 'lego dnshelp' for help on usage.", - }, - cli.IntFlag{ - Name: "http-timeout", - Usage: "Set the HTTP timeout value to a specific value in seconds. The default is 10 seconds.", - }, - cli.IntFlag{ - Name: "dns-timeout", - Usage: "Set the DNS timeout value to a specific value in seconds. The default is 10 seconds.", - }, - cli.StringSliceFlag{ - Name: "dns-resolvers", - Usage: "Set the resolvers to use for performing recursive DNS queries. Supported: host:port. The default is to use Google's DNS resolvers.", - }, - cli.BoolFlag{ - Name: "pem", - Usage: "Generate a .pem file by concatanating the .key and .crt files together.", - }, - } - - err = app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} - -func dnshelp(c *cli.Context) error { - fmt.Printf( - `Credentials for DNS providers must be passed through environment variables. - -Here is an example bash command using the CloudFlare DNS provider: - - $ CLOUDFLARE_EMAIL=foo@bar.com \ - CLOUDFLARE_API_KEY=b9841238feb177a84330febba8a83208921177bffe733 \ - lego --dns cloudflare --domains www.example.com --email me@bar.com run - -`) - - w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0) - fmt.Fprintln(w, "Valid providers and their associated credential environment variables:") - fmt.Fprintln(w) - fmt.Fprintln(w, "\tazure:\tAZURE_CLIENT_ID, AZURE_CLIENT_SECRET, AZURE_SUBSCRIPTION_ID, AZURE_TENANT_ID, AZURE_RESOURCE_GROUP") - fmt.Fprintln(w, "\tauroradns:\tAURORA_USER_ID, AURORA_KEY, AURORA_ENDPOINT") - fmt.Fprintln(w, "\tcloudflare:\tCLOUDFLARE_EMAIL, CLOUDFLARE_API_KEY") - fmt.Fprintln(w, "\tdigitalocean:\tDO_AUTH_TOKEN") - fmt.Fprintln(w, "\tdnsimple:\tDNSIMPLE_EMAIL, DNSIMPLE_OAUTH_TOKEN") - fmt.Fprintln(w, "\tdnsmadeeasy:\tDNSMADEEASY_API_KEY, DNSMADEEASY_API_SECRET") - fmt.Fprintln(w, "\texoscale:\tEXOSCALE_API_KEY, EXOSCALE_API_SECRET, EXOSCALE_ENDPOINT") - fmt.Fprintln(w, "\tgandi:\tGANDI_API_KEY") - fmt.Fprintln(w, "\tgcloud:\tGCE_PROJECT, GCE_SERVICE_ACCOUNT_FILE") - fmt.Fprintln(w, "\tlinode:\tLINODE_API_KEY") - fmt.Fprintln(w, "\tmanual:\tnone") - fmt.Fprintln(w, "\tnamecheap:\tNAMECHEAP_API_USER, NAMECHEAP_API_KEY") - fmt.Fprintln(w, "\trackspace:\tRACKSPACE_USER, RACKSPACE_API_KEY") - fmt.Fprintln(w, "\trfc2136:\tRFC2136_TSIG_KEY, RFC2136_TSIG_SECRET,\n\t\tRFC2136_TSIG_ALGORITHM, RFC2136_NAMESERVER") - fmt.Fprintln(w, "\troute53:\tAWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, AWS_HOSTED_ZONE_ID") - fmt.Fprintln(w, "\tdyn:\tDYN_CUSTOMER_NAME, DYN_USER_NAME, DYN_PASSWORD") - fmt.Fprintln(w, "\tvultr:\tVULTR_API_KEY") - fmt.Fprintln(w, "\tovh:\tOVH_ENDPOINT, OVH_APPLICATION_KEY, OVH_APPLICATION_SECRET, OVH_CONSUMER_KEY") - fmt.Fprintln(w, "\tpdns:\tPDNS_API_KEY, PDNS_API_URL") - fmt.Fprintln(w, "\tdnspod:\tDNSPOD_API_KEY") - fmt.Fprintln(w, "\totc:\tOTC_USER_NAME, OTC_PASSWORD, OTC_PROJECT_NAME, OTC_DOMAIN_NAME, OTC_IDENTITY_ENDPOINT") - w.Flush() - - fmt.Println(` -For a more detailed explanation of a DNS provider's credential variables, -please consult their online documentation.`) - - return nil -} diff --git a/vendor/github.com/xenolf/lego/cli_handlers.go b/vendor/github.com/xenolf/lego/cli_handlers.go deleted file mode 100644 index b8790c4b2..000000000 --- a/vendor/github.com/xenolf/lego/cli_handlers.go +++ /dev/null @@ -1,423 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "crypto/x509" - "encoding/json" - "encoding/pem" - "io/ioutil" - "net/http" - "os" - "path" - "strings" - "time" - - "github.com/urfave/cli" - "github.com/xenolf/lego/acme" - "github.com/xenolf/lego/providers/dns" - "github.com/xenolf/lego/providers/http/memcached" - "github.com/xenolf/lego/providers/http/webroot" -) - -func checkFolder(path string) error { - if _, err := os.Stat(path); os.IsNotExist(err) { - return os.MkdirAll(path, 0700) - } - return nil -} - -func setup(c *cli.Context) (*Configuration, *Account, *acme.Client) { - - if c.GlobalIsSet("http-timeout") { - acme.HTTPClient = http.Client{Timeout: time.Duration(c.GlobalInt("http-timeout")) * time.Second} - } - - if c.GlobalIsSet("dns-timeout") { - acme.DNSTimeout = time.Duration(c.GlobalInt("dns-timeout")) * time.Second - } - - if len(c.GlobalStringSlice("dns-resolvers")) > 0 { - resolvers := []string{} - for _, resolver := range c.GlobalStringSlice("dns-resolvers") { - if !strings.Contains(resolver, ":") { - resolver += ":53" - } - resolvers = append(resolvers, resolver) - } - acme.RecursiveNameservers = resolvers - } - - err := checkFolder(c.GlobalString("path")) - if err != nil { - logger().Fatalf("Could not check/create path: %s", err.Error()) - } - - conf := NewConfiguration(c) - if len(c.GlobalString("email")) == 0 { - logger().Fatal("You have to pass an account (email address) to the program using --email or -m") - } - - //TODO: move to account struct? Currently MUST pass email. - acc := NewAccount(c.GlobalString("email"), conf) - - keyType, err := conf.KeyType() - if err != nil { - logger().Fatal(err.Error()) - } - - client, err := acme.NewClient(c.GlobalString("server"), acc, keyType) - if err != nil { - logger().Fatalf("Could not create client: %s", err.Error()) - } - - if len(c.GlobalStringSlice("exclude")) > 0 { - client.ExcludeChallenges(conf.ExcludedSolvers()) - } - - if c.GlobalIsSet("webroot") { - provider, err := webroot.NewHTTPProvider(c.GlobalString("webroot")) - if err != nil { - logger().Fatal(err) - } - - client.SetChallengeProvider(acme.HTTP01, provider) - - // --webroot=foo indicates that the user specifically want to do a HTTP challenge - // infer that the user also wants to exclude all other challenges - client.ExcludeChallenges([]acme.Challenge{acme.DNS01, acme.TLSSNI01}) - } - if c.GlobalIsSet("memcached-host") { - provider, err := memcached.NewMemcachedProvider(c.GlobalStringSlice("memcached-host")) - if err != nil { - logger().Fatal(err) - } - - client.SetChallengeProvider(acme.HTTP01, provider) - - // --memcached-host=foo:11211 indicates that the user specifically want to do a HTTP challenge - // infer that the user also wants to exclude all other challenges - client.ExcludeChallenges([]acme.Challenge{acme.DNS01, acme.TLSSNI01}) - } - if c.GlobalIsSet("http") { - if strings.Index(c.GlobalString("http"), ":") == -1 { - logger().Fatalf("The --http switch only accepts interface:port or :port for its argument.") - } - client.SetHTTPAddress(c.GlobalString("http")) - } - - if c.GlobalIsSet("tls") { - if strings.Index(c.GlobalString("tls"), ":") == -1 { - logger().Fatalf("The --tls switch only accepts interface:port or :port for its argument.") - } - client.SetTLSAddress(c.GlobalString("tls")) - } - - if c.GlobalIsSet("dns") { - provider, err := dns.NewDNSChallengeProviderByName(c.GlobalString("dns")) - if err != nil { - logger().Fatal(err) - } - - client.SetChallengeProvider(acme.DNS01, provider) - - // --dns=foo indicates that the user specifically want to do a DNS challenge - // infer that the user also wants to exclude all other challenges - client.ExcludeChallenges([]acme.Challenge{acme.HTTP01, acme.TLSSNI01}) - } - - return conf, acc, client -} - -func saveCertRes(certRes acme.CertificateResource, conf *Configuration) { - // We store the certificate, private key and metadata in different files - // as web servers would not be able to work with a combined file. - certOut := path.Join(conf.CertPath(), certRes.Domain+".crt") - privOut := path.Join(conf.CertPath(), certRes.Domain+".key") - pemOut := path.Join(conf.CertPath(), certRes.Domain+".pem") - metaOut := path.Join(conf.CertPath(), certRes.Domain+".json") - issuerOut := path.Join(conf.CertPath(), certRes.Domain+".issuer.crt") - - err := ioutil.WriteFile(certOut, certRes.Certificate, 0600) - if err != nil { - logger().Fatalf("Unable to save Certificate for domain %s\n\t%s", certRes.Domain, err.Error()) - } - - if certRes.IssuerCertificate != nil { - err = ioutil.WriteFile(issuerOut, certRes.IssuerCertificate, 0600) - if err != nil { - logger().Fatalf("Unable to save IssuerCertificate for domain %s\n\t%s", certRes.Domain, err.Error()) - } - } - - if certRes.PrivateKey != nil { - // if we were given a CSR, we don't know the private key - err = ioutil.WriteFile(privOut, certRes.PrivateKey, 0600) - if err != nil { - logger().Fatalf("Unable to save PrivateKey for domain %s\n\t%s", certRes.Domain, err.Error()) - } - - if conf.context.GlobalBool("pem") { - err = ioutil.WriteFile(pemOut, bytes.Join([][]byte{certRes.Certificate, certRes.PrivateKey}, nil), 0600) - if err != nil { - logger().Fatalf("Unable to save Certificate and PrivateKey in .pem for domain %s\n\t%s", certRes.Domain, err.Error()) - } - } - - } else if conf.context.GlobalBool("pem") { - // we don't have the private key; can't write the .pem file - logger().Fatalf("Unable to save pem without private key for domain %s\n\t%s; are you using a CSR?", certRes.Domain, err.Error()) - } - - jsonBytes, err := json.MarshalIndent(certRes, "", "\t") - if err != nil { - logger().Fatalf("Unable to marshal CertResource for domain %s\n\t%s", certRes.Domain, err.Error()) - } - - err = ioutil.WriteFile(metaOut, jsonBytes, 0600) - if err != nil { - logger().Fatalf("Unable to save CertResource for domain %s\n\t%s", certRes.Domain, err.Error()) - } -} - -func handleTOS(c *cli.Context, client *acme.Client, acc *Account) { - // Check for a global accept override - if c.GlobalBool("accept-tos") { - err := client.AgreeToTOS() - if err != nil { - logger().Fatalf("Could not agree to TOS: %s", err.Error()) - } - - acc.Save() - return - } - - reader := bufio.NewReader(os.Stdin) - logger().Printf("Please review the TOS at %s", acc.Registration.TosURL) - - for { - logger().Println("Do you accept the TOS? Y/n") - text, err := reader.ReadString('\n') - if err != nil { - logger().Fatalf("Could not read from console: %s", err.Error()) - } - - text = strings.Trim(text, "\r\n") - - if text == "n" { - logger().Fatal("You did not accept the TOS. Unable to proceed.") - } - - if text == "Y" || text == "y" || text == "" { - err = client.AgreeToTOS() - if err != nil { - logger().Fatalf("Could not agree to TOS: %s", err.Error()) - } - acc.Save() - break - } - - logger().Println("Your input was invalid. Please answer with one of Y/y, n or by pressing enter.") - } -} - -func readCSRFile(filename string) (*x509.CertificateRequest, error) { - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - raw := bytes - - // see if we can find a PEM-encoded CSR - var p *pem.Block - rest := bytes - for { - // decode a PEM block - p, rest = pem.Decode(rest) - - // did we fail? - if p == nil { - break - } - - // did we get a CSR? - if p.Type == "CERTIFICATE REQUEST" { - raw = p.Bytes - } - } - - // no PEM-encoded CSR - // assume we were given a DER-encoded ASN.1 CSR - // (if this assumption is wrong, parsing these bytes will fail) - return x509.ParseCertificateRequest(raw) -} - -func run(c *cli.Context) error { - conf, acc, client := setup(c) - if acc.Registration == nil { - reg, err := client.Register() - if err != nil { - logger().Fatalf("Could not complete registration\n\t%s", err.Error()) - } - - acc.Registration = reg - acc.Save() - - logger().Print("!!!! HEADS UP !!!!") - logger().Printf(` - Your account credentials have been saved in your Let's Encrypt - configuration directory at "%s". - You should make a secure backup of this folder now. This - configuration directory will also contain certificates and - private keys obtained from Let's Encrypt so making regular - backups of this folder is ideal.`, conf.AccountPath(c.GlobalString("email"))) - - } - - // If the agreement URL is empty, the account still needs to accept the LE TOS. - if acc.Registration.Body.Agreement == "" { - handleTOS(c, client, acc) - } - - // we require either domains or csr, but not both - hasDomains := len(c.GlobalStringSlice("domains")) > 0 - hasCsr := len(c.GlobalString("csr")) > 0 - if hasDomains && hasCsr { - logger().Fatal("Please specify either --domains/-d or --csr/-c, but not both") - } - if !hasDomains && !hasCsr { - logger().Fatal("Please specify --domains/-d (or --csr/-c if you already have a CSR)") - } - - var cert acme.CertificateResource - var failures map[string]error - - if hasDomains { - // obtain a certificate, generating a new private key - cert, failures = client.ObtainCertificate(c.GlobalStringSlice("domains"), !c.Bool("no-bundle"), nil, c.Bool("must-staple")) - } else { - // read the CSR - csr, err := readCSRFile(c.GlobalString("csr")) - if err != nil { - // we couldn't read the CSR - failures = map[string]error{"csr": err} - } else { - // obtain a certificate for this CSR - cert, failures = client.ObtainCertificateForCSR(*csr, !c.Bool("no-bundle")) - } - } - - if len(failures) > 0 { - for k, v := range failures { - logger().Printf("[%s] Could not obtain certificates\n\t%s", k, v.Error()) - } - - // Make sure to return a non-zero exit code if ObtainSANCertificate - // returned at least one error. Due to us not returning partial - // certificate we can just exit here instead of at the end. - os.Exit(1) - } - - err := checkFolder(conf.CertPath()) - if err != nil { - logger().Fatalf("Could not check/create path: %s", err.Error()) - } - - saveCertRes(cert, conf) - - return nil -} - -func revoke(c *cli.Context) error { - conf, acc, client := setup(c) - if acc.Registration == nil { - logger().Fatalf("Account %s is not registered. Use 'run' to register a new account.\n", acc.Email) - } - - err := checkFolder(conf.CertPath()) - if err != nil { - logger().Fatalf("Could not check/create path: %s", err.Error()) - } - - for _, domain := range c.GlobalStringSlice("domains") { - logger().Printf("Trying to revoke certificate for domain %s", domain) - - certPath := path.Join(conf.CertPath(), domain+".crt") - certBytes, err := ioutil.ReadFile(certPath) - - err = client.RevokeCertificate(certBytes) - if err != nil { - logger().Fatalf("Error while revoking the certificate for domain %s\n\t%s", domain, err.Error()) - } else { - logger().Print("Certificate was revoked.") - } - } - - return nil -} - -func renew(c *cli.Context) error { - conf, acc, client := setup(c) - if acc.Registration == nil { - logger().Fatalf("Account %s is not registered. Use 'run' to register a new account.\n", acc.Email) - } - - if len(c.GlobalStringSlice("domains")) <= 0 { - logger().Fatal("Please specify at least one domain.") - } - - domain := c.GlobalStringSlice("domains")[0] - - // load the cert resource from files. - // We store the certificate, private key and metadata in different files - // as web servers would not be able to work with a combined file. - certPath := path.Join(conf.CertPath(), domain+".crt") - privPath := path.Join(conf.CertPath(), domain+".key") - metaPath := path.Join(conf.CertPath(), domain+".json") - - certBytes, err := ioutil.ReadFile(certPath) - if err != nil { - logger().Fatalf("Error while loading the certificate for domain %s\n\t%s", domain, err.Error()) - } - - if c.IsSet("days") { - expTime, err := acme.GetPEMCertExpiration(certBytes) - if err != nil { - logger().Printf("Could not get Certification expiration for domain %s", domain) - } - - if int(expTime.Sub(time.Now()).Hours()/24.0) > c.Int("days") { - return nil - } - } - - metaBytes, err := ioutil.ReadFile(metaPath) - if err != nil { - logger().Fatalf("Error while loading the meta data for domain %s\n\t%s", domain, err.Error()) - } - - var certRes acme.CertificateResource - err = json.Unmarshal(metaBytes, &certRes) - if err != nil { - logger().Fatalf("Error while marshalling the meta data for domain %s\n\t%s", domain, err.Error()) - } - - if c.Bool("reuse-key") { - keyBytes, err := ioutil.ReadFile(privPath) - if err != nil { - logger().Fatalf("Error while loading the private key for domain %s\n\t%s", domain, err.Error()) - } - certRes.PrivateKey = keyBytes - } - - certRes.Certificate = certBytes - - newCert, err := client.RenewCertificate(certRes, !c.Bool("no-bundle"), c.Bool("must-staple")) - if err != nil { - logger().Fatalf("%s", err.Error()) - } - - saveCertRes(newCert, conf) - - return nil -} diff --git a/vendor/github.com/xenolf/lego/configuration.go b/vendor/github.com/xenolf/lego/configuration.go deleted file mode 100644 index f92c1fe96..000000000 --- a/vendor/github.com/xenolf/lego/configuration.go +++ /dev/null @@ -1,76 +0,0 @@ -package main - -import ( - "fmt" - "net/url" - "os" - "path" - "strings" - - "github.com/urfave/cli" - "github.com/xenolf/lego/acme" -) - -// Configuration type from CLI and config files. -type Configuration struct { - context *cli.Context -} - -// NewConfiguration creates a new configuration from CLI data. -func NewConfiguration(c *cli.Context) *Configuration { - return &Configuration{context: c} -} - -// KeyType the type from which private keys should be generated -func (c *Configuration) KeyType() (acme.KeyType, error) { - switch strings.ToUpper(c.context.GlobalString("key-type")) { - case "RSA2048": - return acme.RSA2048, nil - case "RSA4096": - return acme.RSA4096, nil - case "RSA8192": - return acme.RSA8192, nil - case "EC256": - return acme.EC256, nil - case "EC384": - return acme.EC384, nil - } - - return "", fmt.Errorf("Unsupported KeyType: %s", c.context.GlobalString("key-type")) -} - -// ExcludedSolvers is a list of solvers that are to be excluded. -func (c *Configuration) ExcludedSolvers() (cc []acme.Challenge) { - for _, s := range c.context.GlobalStringSlice("exclude") { - cc = append(cc, acme.Challenge(s)) - } - return -} - -// ServerPath returns the OS dependent path to the data for a specific CA -func (c *Configuration) ServerPath() string { - srv, _ := url.Parse(c.context.GlobalString("server")) - srvStr := strings.Replace(srv.Host, ":", "_", -1) - return strings.Replace(srvStr, "/", string(os.PathSeparator), -1) -} - -// CertPath gets the path for certificates. -func (c *Configuration) CertPath() string { - return path.Join(c.context.GlobalString("path"), "certificates") -} - -// AccountsPath returns the OS dependent path to the -// local accounts for a specific CA -func (c *Configuration) AccountsPath() string { - return path.Join(c.context.GlobalString("path"), "accounts", c.ServerPath()) -} - -// AccountPath returns the OS dependent path to a particular account -func (c *Configuration) AccountPath(acc string) string { - return path.Join(c.AccountsPath(), acc) -} - -// AccountKeysPath returns the OS dependent path to the keys of a particular account -func (c *Configuration) AccountKeysPath(acc string) string { - return path.Join(c.AccountPath(acc), "keys") -} diff --git a/vendor/github.com/xenolf/lego/crypto.go b/vendor/github.com/xenolf/lego/crypto.go deleted file mode 100644 index 8b23e2fc1..000000000 --- a/vendor/github.com/xenolf/lego/crypto.go +++ /dev/null @@ -1,56 +0,0 @@ -package main - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "encoding/pem" - "errors" - "io/ioutil" - "os" -) - -func generatePrivateKey(file string) (crypto.PrivateKey, error) { - - privateKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - if err != nil { - return nil, err - } - - keyBytes, err := x509.MarshalECPrivateKey(privateKey) - if err != nil { - return nil, err - } - - pemKey := pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes} - - certOut, err := os.Create(file) - if err != nil { - return nil, err - } - - pem.Encode(certOut, &pemKey) - certOut.Close() - - return privateKey, nil -} - -func loadPrivateKey(file string) (crypto.PrivateKey, error) { - keyBytes, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - - keyBlock, _ := pem.Decode(keyBytes) - - switch keyBlock.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(keyBlock.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(keyBlock.Bytes) - } - - return nil, errors.New("Unknown private key type.") -} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go deleted file mode 100644 index 1ab07d078..000000000 --- a/vendor/golang.org/x/crypto/ssh/buffer.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "sync" -) - -// buffer provides a linked list buffer for data exchange -// between producer and consumer. Theoretically the buffer is -// of unlimited capacity as it does no allocation of its own. -type buffer struct { - // protects concurrent access to head, tail and closed - *sync.Cond - - head *element // the buffer that will be read first - tail *element // the buffer that will be read last - - closed bool -} - -// An element represents a single link in a linked list. -type element struct { - buf []byte - next *element -} - -// newBuffer returns an empty buffer that is not closed. -func newBuffer() *buffer { - e := new(element) - b := &buffer{ - Cond: newCond(), - head: e, - tail: e, - } - return b -} - -// write makes buf available for Read to receive. -// buf must not be modified after the call to write. -func (b *buffer) write(buf []byte) { - b.Cond.L.Lock() - e := &element{buf: buf} - b.tail.next = e - b.tail = e - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// eof closes the buffer. Reads from the buffer once all -// the data has been consumed will receive io.EOF. -func (b *buffer) eof() { - b.Cond.L.Lock() - b.closed = true - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// Read reads data from the internal buffer in buf. Reads will block -// if no data is available, or until the buffer is closed. -func (b *buffer) Read(buf []byte) (n int, err error) { - b.Cond.L.Lock() - defer b.Cond.L.Unlock() - - for len(buf) > 0 { - // if there is data in b.head, copy it - if len(b.head.buf) > 0 { - r := copy(buf, b.head.buf) - buf, b.head.buf = buf[r:], b.head.buf[r:] - n += r - continue - } - // if there is a next buffer, make it the head - if len(b.head.buf) == 0 && b.head != b.tail { - b.head = b.head.next - continue - } - - // if at least one byte has been copied, return - if n > 0 { - break - } - - // if nothing was read, and there is nothing outstanding - // check to see if the buffer is closed. - if b.closed { - err = io.EOF - break - } - // out of buffers, wait for producer - b.Cond.Wait() - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go deleted file mode 100644 index cfc8ead1b..000000000 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sort" - "time" -) - -// These constants from [PROTOCOL.certkeys] represent the algorithm names -// for certificate types supported by this package. -const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" -) - -// Certificate types distinguish between host and user -// certificates. The values can be set in the CertType field of -// Certificate. -const ( - UserCert = 1 - HostCert = 2 -) - -// Signature represents a cryptographic signature. -type Signature struct { - Format string - Blob []byte -} - -// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that -// a certificate does not expire. -const CertTimeInfinity = 1<<64 - 1 - -// An Certificate represents an OpenSSH certificate as defined in -// [PROTOCOL.certkeys]?rev=1.8. -type Certificate struct { - Nonce []byte - Key PublicKey - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []string - ValidAfter uint64 - ValidBefore uint64 - Permissions - Reserved []byte - SignatureKey PublicKey - Signature *Signature -} - -// genericCertData holds the key-independent part of the certificate data. -// Overall, certificates contain an nonce, public key fields and -// key-independent fields. -type genericCertData struct { - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []byte - ValidAfter uint64 - ValidBefore uint64 - CriticalOptions []byte - Extensions []byte - Reserved []byte - SignatureKey []byte - Signature []byte -} - -func marshalStringList(namelist []string) []byte { - var to []byte - for _, name := range namelist { - s := struct{ N string }{name} - to = append(to, Marshal(&s)...) - } - return to -} - -type optionsTuple struct { - Key string - Value []byte -} - -type optionsTupleValue struct { - Value string -} - -// serialize a map of critical options or extensions -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty string value -func marshalTuples(tups map[string]string) []byte { - keys := make([]string, 0, len(tups)) - for key := range tups { - keys = append(keys, key) - } - sort.Strings(keys) - - var ret []byte - for _, key := range keys { - s := optionsTuple{Key: key} - if value := tups[key]; len(value) > 0 { - s.Value = Marshal(&optionsTupleValue{value}) - } - ret = append(ret, Marshal(&s)...) - } - return ret -} - -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty option value -func parseTuples(in []byte) (map[string]string, error) { - tups := map[string]string{} - var lastKey string - var haveLastKey bool - - for len(in) > 0 { - var key, val, extra []byte - var ok bool - - if key, in, ok = parseString(in); !ok { - return nil, errShortRead - } - keyStr := string(key) - // according to [PROTOCOL.certkeys], the names must be in - // lexical order. - if haveLastKey && keyStr <= lastKey { - return nil, fmt.Errorf("ssh: certificate options are not in lexical order") - } - lastKey, haveLastKey = keyStr, true - // the next field is a data field, which if non-empty has a string embedded - if val, in, ok = parseString(in); !ok { - return nil, errShortRead - } - if len(val) > 0 { - val, extra, ok = parseString(val) - if !ok { - return nil, errShortRead - } - if len(extra) > 0 { - return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") - } - tups[keyStr] = string(val) - } else { - tups[keyStr] = "" - } - } - return tups, nil -} - -func parseCert(in []byte, privAlgo string) (*Certificate, error) { - nonce, rest, ok := parseString(in) - if !ok { - return nil, errShortRead - } - - key, rest, err := parsePubKey(rest, privAlgo) - if err != nil { - return nil, err - } - - var g genericCertData - if err := Unmarshal(rest, &g); err != nil { - return nil, err - } - - c := &Certificate{ - Nonce: nonce, - Key: key, - Serial: g.Serial, - CertType: g.CertType, - KeyId: g.KeyId, - ValidAfter: g.ValidAfter, - ValidBefore: g.ValidBefore, - } - - for principals := g.ValidPrincipals; len(principals) > 0; { - principal, rest, ok := parseString(principals) - if !ok { - return nil, errShortRead - } - c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) - principals = rest - } - - c.CriticalOptions, err = parseTuples(g.CriticalOptions) - if err != nil { - return nil, err - } - c.Extensions, err = parseTuples(g.Extensions) - if err != nil { - return nil, err - } - c.Reserved = g.Reserved - k, err := ParsePublicKey(g.SignatureKey) - if err != nil { - return nil, err - } - - c.SignatureKey = k - c.Signature, rest, ok = parseSignatureBody(g.Signature) - if !ok || len(rest) > 0 { - return nil, errors.New("ssh: signature parse error") - } - - return c, nil -} - -type openSSHCertSigner struct { - pub *Certificate - signer Signer -} - -// NewCertSigner returns a Signer that signs with the given Certificate, whose -// private key is held by signer. It returns an error if the public key in cert -// doesn't match the key used by signer. -func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return nil, errors.New("ssh: signer and cert have different public key") - } - - return &openSSHCertSigner{cert, signer}, nil -} - -func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.signer.Sign(rand, data) -} - -func (s *openSSHCertSigner) PublicKey() PublicKey { - return s.pub -} - -const sourceAddressCriticalOption = "source-address" - -// CertChecker does the work of verifying a certificate. Its methods -// can be plugged into ClientConfig.HostKeyCallback and -// ServerConfig.PublicKeyCallback. For the CertChecker to work, -// minimally, the IsAuthority callback should be set. -type CertChecker struct { - // SupportedCriticalOptions lists the CriticalOptions that the - // server application layer understands. These are only used - // for user certificates. - SupportedCriticalOptions []string - - // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. - IsUserAuthority func(auth PublicKey) bool - - // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. - IsHostAuthority func(auth PublicKey, address string) bool - - // Clock is used for verifying time stamps. If nil, time.Now - // is used. - Clock func() time.Time - - // UserKeyFallback is called when CertChecker.Authenticate encounters a - // public key that is not a certificate. It must implement validation - // of user keys or else, if nil, all such keys are rejected. - UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // HostKeyFallback is called when CertChecker.CheckHostKey encounters a - // public key that is not a certificate. It must implement host key - // validation or else, if nil, all such keys are rejected. - HostKeyFallback HostKeyCallback - - // IsRevoked is called for each certificate so that revocation checking - // can be implemented. It should return true if the given certificate - // is revoked and false otherwise. If nil, no certificates are - // considered to have been revoked. - IsRevoked func(cert *Certificate) bool -} - -// CheckHostKey checks a host key certificate. This method can be -// plugged into ClientConfig.HostKeyCallback. -func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { - cert, ok := key.(*Certificate) - if !ok { - if c.HostKeyFallback != nil { - return c.HostKeyFallback(addr, remote, key) - } - return errors.New("ssh: non-certificate host key") - } - if cert.CertType != HostCert { - return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) - } - if !c.IsHostAuthority(cert.SignatureKey, addr) { - return fmt.Errorf("ssh: no authorities for hostname: %v", addr) - } - - hostname, _, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - // Pass hostname only as principal for host certificates (consistent with OpenSSH) - return c.CheckCert(hostname, cert) -} - -// Authenticate checks a user certificate. Authenticate can be used as -// a value for ServerConfig.PublicKeyCallback. -func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { - cert, ok := pubKey.(*Certificate) - if !ok { - if c.UserKeyFallback != nil { - return c.UserKeyFallback(conn, pubKey) - } - return nil, errors.New("ssh: normal key pairs not accepted") - } - - if cert.CertType != UserCert { - return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) - } - if !c.IsUserAuthority(cert.SignatureKey) { - return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") - } - - if err := c.CheckCert(conn.User(), cert); err != nil { - return nil, err - } - - return &cert.Permissions, nil -} - -// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and -// the signature of the certificate. -func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { - if c.IsRevoked != nil && c.IsRevoked(cert) { - return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) - } - - for opt := range cert.CriticalOptions { - // sourceAddressCriticalOption will be enforced by - // serverAuthenticate - if opt == sourceAddressCriticalOption { - continue - } - - found := false - for _, supp := range c.SupportedCriticalOptions { - if supp == opt { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) - } - } - - if len(cert.ValidPrincipals) > 0 { - // By default, certs are valid for all users/hosts. - found := false - for _, p := range cert.ValidPrincipals { - if p == principal { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) - } - } - - clock := c.Clock - if clock == nil { - clock = time.Now - } - - unixNow := clock().Unix() - if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { - return fmt.Errorf("ssh: cert is not yet valid") - } - if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { - return fmt.Errorf("ssh: cert has expired") - } - if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { - return fmt.Errorf("ssh: certificate signature does not verify") - } - - return nil -} - -// SignCert sets c.SignatureKey to the authority's public key and stores a -// Signature, by authority, in the certificate. -func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { - c.Nonce = make([]byte, 32) - if _, err := io.ReadFull(rand, c.Nonce); err != nil { - return err - } - c.SignatureKey = authority.PublicKey() - - sig, err := authority.Sign(rand, c.bytesForSigning()) - if err != nil { - return err - } - c.Signature = sig - return nil -} - -var certAlgoNames = map[string]string{ - KeyAlgoRSA: CertAlgoRSAv01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoED25519: CertAlgoED25519v01, -} - -// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. -// Panics if a non-certificate algorithm is passed. -func certToPrivAlgo(algo string) string { - for privAlgo, pubAlgo := range certAlgoNames { - if pubAlgo == algo { - return privAlgo - } - } - panic("unknown cert algorithm") -} - -func (cert *Certificate) bytesForSigning() []byte { - c2 := *cert - c2.Signature = nil - out := c2.Marshal() - // Drop trailing signature length. - return out[:len(out)-4] -} - -// Marshal serializes c into OpenSSH's wire format. It is part of the -// PublicKey interface. -func (c *Certificate) Marshal() []byte { - generic := genericCertData{ - Serial: c.Serial, - CertType: c.CertType, - KeyId: c.KeyId, - ValidPrincipals: marshalStringList(c.ValidPrincipals), - ValidAfter: uint64(c.ValidAfter), - ValidBefore: uint64(c.ValidBefore), - CriticalOptions: marshalTuples(c.CriticalOptions), - Extensions: marshalTuples(c.Extensions), - Reserved: c.Reserved, - SignatureKey: c.SignatureKey.Marshal(), - } - if c.Signature != nil { - generic.Signature = Marshal(c.Signature) - } - genericBytes := Marshal(&generic) - keyBytes := c.Key.Marshal() - _, keyBytes, _ = parseString(keyBytes) - prefix := Marshal(&struct { - Name string - Nonce []byte - Key []byte `ssh:"rest"` - }{c.Type(), c.Nonce, keyBytes}) - - result := make([]byte, 0, len(prefix)+len(genericBytes)) - result = append(result, prefix...) - result = append(result, genericBytes...) - return result -} - -// Type returns the key name. It is part of the PublicKey interface. -func (c *Certificate) Type() string { - algo, ok := certAlgoNames[c.Key.Type()] - if !ok { - panic("unknown cert key type " + c.Key.Type()) - } - return algo -} - -// Verify verifies a signature against the certificate's public -// key. It is part of the PublicKey interface. -func (c *Certificate) Verify(data []byte, sig *Signature) error { - return c.Key.Verify(data, sig) -} - -func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { - format, in, ok := parseString(in) - if !ok { - return - } - - out = &Signature{ - Format: string(format), - } - - if out.Blob, in, ok = parseString(in); !ok { - return - } - - return out, in, ok -} - -func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { - sigBytes, rest, ok := parseString(in) - if !ok { - return - } - - out, trailing, ok := parseSignatureBody(sigBytes) - if !ok || len(trailing) > 0 { - return nil, nil, false - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go deleted file mode 100644 index c0834c00d..000000000 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "sync" -) - -const ( - minPacketLength = 9 - // channelMaxPacket contains the maximum number of bytes that will be - // sent in a single packet. As per RFC 4253, section 6.1, 32k is also - // the minimum. - channelMaxPacket = 1 << 15 - // We follow OpenSSH here. - channelWindowSize = 64 * channelMaxPacket -) - -// NewChannel represents an incoming request to a channel. It must either be -// accepted for use by calling Accept, or rejected by calling Reject. -type NewChannel interface { - // Accept accepts the channel creation request. It returns the Channel - // and a Go channel containing SSH requests. The Go channel must be - // serviced otherwise the Channel will hang. - Accept() (Channel, <-chan *Request, error) - - // Reject rejects the channel creation request. After calling - // this, no other methods on the Channel may be called. - Reject(reason RejectionReason, message string) error - - // ChannelType returns the type of the channel, as supplied by the - // client. - ChannelType() string - - // ExtraData returns the arbitrary payload for this channel, as supplied - // by the client. This data is specific to the channel type. - ExtraData() []byte -} - -// A Channel is an ordered, reliable, flow-controlled, duplex stream -// that is multiplexed over an SSH connection. -type Channel interface { - // Read reads up to len(data) bytes from the channel. - Read(data []byte) (int, error) - - // Write writes len(data) bytes to the channel. - Write(data []byte) (int, error) - - // Close signals end of channel use. No data may be sent after this - // call. - Close() error - - // CloseWrite signals the end of sending in-band - // data. Requests may still be sent, and the other side may - // still send data - CloseWrite() error - - // SendRequest sends a channel request. If wantReply is true, - // it will wait for a reply and return the result as a - // boolean, otherwise the return value will be false. Channel - // requests are out-of-band messages so they may be sent even - // if the data stream is closed or blocked by flow control. - // If the channel is closed before a reply is returned, io.EOF - // is returned. - SendRequest(name string, wantReply bool, payload []byte) (bool, error) - - // Stderr returns an io.ReadWriter that writes to this channel - // with the extended data type set to stderr. Stderr may - // safely be read and written from a different goroutine than - // Read and Write respectively. - Stderr() io.ReadWriter -} - -// Request is a request sent outside of the normal stream of -// data. Requests can either be specific to an SSH channel, or they -// can be global. -type Request struct { - Type string - WantReply bool - Payload []byte - - ch *channel - mux *mux -} - -// Reply sends a response to a request. It must be called for all requests -// where WantReply is true and is a no-op otherwise. The payload argument is -// ignored for replies to channel-specific requests. -func (r *Request) Reply(ok bool, payload []byte) error { - if !r.WantReply { - return nil - } - - if r.ch == nil { - return r.mux.ackRequest(ok, payload) - } - - return r.ch.ackRequest(ok) -} - -// RejectionReason is an enumeration used when rejecting channel creation -// requests. See RFC 4254, section 5.1. -type RejectionReason uint32 - -const ( - Prohibited RejectionReason = iota + 1 - ConnectionFailed - UnknownChannelType - ResourceShortage -) - -// String converts the rejection reason to human readable form. -func (r RejectionReason) String() string { - switch r { - case Prohibited: - return "administratively prohibited" - case ConnectionFailed: - return "connect failed" - case UnknownChannelType: - return "unknown channel type" - case ResourceShortage: - return "resource shortage" - } - return fmt.Sprintf("unknown reason %d", int(r)) -} - -func min(a uint32, b int) uint32 { - if a < uint32(b) { - return a - } - return uint32(b) -} - -type channelDirection uint8 - -const ( - channelInbound channelDirection = iota - channelOutbound -) - -// channel is an implementation of the Channel interface that works -// with the mux class. -type channel struct { - // R/O after creation - chanType string - extraData []byte - localId, remoteId uint32 - - // maxIncomingPayload and maxRemotePayload are the maximum - // payload sizes of normal and extended data packets for - // receiving and sending, respectively. The wire packet will - // be 9 or 13 bytes larger (excluding encryption overhead). - maxIncomingPayload uint32 - maxRemotePayload uint32 - - mux *mux - - // decided is set to true if an accept or reject message has been sent - // (for outbound channels) or received (for inbound channels). - decided bool - - // direction contains either channelOutbound, for channels created - // locally, or channelInbound, for channels created by the peer. - direction channelDirection - - // Pending internal channel messages. - msg chan interface{} - - // Since requests have no ID, there can be only one request - // with WantReply=true outstanding. This lock is held by a - // goroutine that has such an outgoing request pending. - sentRequestMu sync.Mutex - - incomingRequests chan *Request - - sentEOF bool - - // thread-safe data - remoteWin window - pending *buffer - extPending *buffer - - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 - - // writeMu serializes calls to mux.conn.writePacket() and - // protects sentClose and packetPool. This mutex must be - // different from windowMu, as writePacket can block if there - // is a key exchange pending. - writeMu sync.Mutex - sentClose bool - - // packetPool has a buffer for each extended channel ID to - // save allocations during writes. - packetPool map[uint32][]byte -} - -// writePacket sends a packet. If the packet is a channel close, it updates -// sentClose. This method takes the lock c.writeMu. -func (ch *channel) writePacket(packet []byte) error { - ch.writeMu.Lock() - if ch.sentClose { - ch.writeMu.Unlock() - return io.EOF - } - ch.sentClose = (packet[0] == msgChannelClose) - err := ch.mux.conn.writePacket(packet) - ch.writeMu.Unlock() - return err -} - -func (ch *channel) sendMessage(msg interface{}) error { - if debugMux { - log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) - } - - p := Marshal(msg) - binary.BigEndian.PutUint32(p[1:], ch.remoteId) - return ch.writePacket(p) -} - -// WriteExtended writes data to a specific extended stream. These streams are -// used, for example, for stderr. -func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { - if ch.sentEOF { - return 0, io.EOF - } - // 1 byte message type, 4 bytes remoteId, 4 bytes data length - opCode := byte(msgChannelData) - headerLength := uint32(9) - if extendedCode > 0 { - headerLength += 4 - opCode = msgChannelExtendedData - } - - ch.writeMu.Lock() - packet := ch.packetPool[extendedCode] - // We don't remove the buffer from packetPool, so - // WriteExtended calls from different goroutines will be - // flagged as errors by the race detector. - ch.writeMu.Unlock() - - for len(data) > 0 { - space := min(ch.maxRemotePayload, len(data)) - if space, err = ch.remoteWin.reserve(space); err != nil { - return n, err - } - if want := headerLength + space; uint32(cap(packet)) < want { - packet = make([]byte, want) - } else { - packet = packet[:want] - } - - todo := data[:space] - - packet[0] = opCode - binary.BigEndian.PutUint32(packet[1:], ch.remoteId) - if extendedCode > 0 { - binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) - } - binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) - copy(packet[headerLength:], todo) - if err = ch.writePacket(packet); err != nil { - return n, err - } - - n += len(todo) - data = data[len(todo):] - } - - ch.writeMu.Lock() - ch.packetPool[extendedCode] = packet - ch.writeMu.Unlock() - - return n, err -} - -func (ch *channel) handleData(packet []byte) error { - headerLen := 9 - isExtendedData := packet[0] == msgChannelExtendedData - if isExtendedData { - headerLen = 13 - } - if len(packet) < headerLen { - // malformed data packet - return parseError(packet[0]) - } - - var extended uint32 - if isExtendedData { - extended = binary.BigEndian.Uint32(packet[5:]) - } - - length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) - if length == 0 { - return nil - } - if length > ch.maxIncomingPayload { - // TODO(hanwen): should send Disconnect? - return errors.New("ssh: incoming packet exceeds maximum payload size") - } - - data := packet[headerLen:] - if length != uint32(len(data)) { - return errors.New("ssh: wrong packet length") - } - - ch.windowMu.Lock() - if ch.myWindow < length { - ch.windowMu.Unlock() - // TODO(hanwen): should send Disconnect with reason? - return errors.New("ssh: remote side wrote too much") - } - ch.myWindow -= length - ch.windowMu.Unlock() - - if extended == 1 { - ch.extPending.write(data) - } else if extended > 0 { - // discard other extended data. - } else { - ch.pending.write(data) - } - return nil -} - -func (c *channel) adjustWindow(n uint32) error { - c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) - c.windowMu.Unlock() - return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), - }) -} - -func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { - switch extended { - case 1: - n, err = c.extPending.Read(data) - case 0: - n, err = c.pending.Read(data) - default: - return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) - } - - if n > 0 { - err = c.adjustWindow(uint32(n)) - // sendWindowAdjust can return io.EOF if the remote - // peer has closed the connection, however we want to - // defer forwarding io.EOF to the caller of Read until - // the buffer has been drained. - if n > 0 && err == io.EOF { - err = nil - } - } - - return n, err -} - -func (c *channel) close() { - c.pending.eof() - c.extPending.eof() - close(c.msg) - close(c.incomingRequests) - c.writeMu.Lock() - // This is not necessary for a normal channel teardown, but if - // there was another error, it is. - c.sentClose = true - c.writeMu.Unlock() - // Unblock writers. - c.remoteWin.close() -} - -// responseMessageReceived is called when a success or failure message is -// received on a channel to check that such a message is reasonable for the -// given channel. -func (ch *channel) responseMessageReceived() error { - if ch.direction == channelInbound { - return errors.New("ssh: channel response message received on inbound channel") - } - if ch.decided { - return errors.New("ssh: duplicate response received for channel") - } - ch.decided = true - return nil -} - -func (ch *channel) handlePacket(packet []byte) error { - switch packet[0] { - case msgChannelData, msgChannelExtendedData: - return ch.handleData(packet) - case msgChannelClose: - ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) - ch.mux.chanList.remove(ch.localId) - ch.close() - return nil - case msgChannelEOF: - // RFC 4254 is mute on how EOF affects dataExt messages but - // it is logical to signal EOF at the same time. - ch.extPending.eof() - ch.pending.eof() - return nil - } - - decoded, err := decode(packet) - if err != nil { - return err - } - - switch msg := decoded.(type) { - case *channelOpenFailureMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - ch.mux.chanList.remove(msg.PeersID) - ch.msg <- msg - case *channelOpenConfirmMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) - } - ch.remoteId = msg.MyID - ch.maxRemotePayload = msg.MaxPacketSize - ch.remoteWin.add(msg.MyWindow) - ch.msg <- msg - case *windowAdjustMsg: - if !ch.remoteWin.add(msg.AdditionalBytes) { - return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) - } - case *channelRequestMsg: - req := Request{ - Type: msg.Request, - WantReply: msg.WantReply, - Payload: msg.RequestSpecificData, - ch: ch, - } - - ch.incomingRequests <- &req - default: - ch.msg <- msg - } - return nil -} - -func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { - ch := &channel{ - remoteWin: window{Cond: newCond()}, - myWindow: channelWindowSize, - pending: newBuffer(), - extPending: newBuffer(), - direction: direction, - incomingRequests: make(chan *Request, chanSize), - msg: make(chan interface{}, chanSize), - chanType: chanType, - extraData: extraData, - mux: m, - packetPool: make(map[uint32][]byte), - } - ch.localId = m.chanList.add(ch) - return ch -} - -var errUndecided = errors.New("ssh: must Accept or Reject channel") -var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") - -type extChannel struct { - code uint32 - ch *channel -} - -func (e *extChannel) Write(data []byte) (n int, err error) { - return e.ch.WriteExtended(data, e.code) -} - -func (e *extChannel) Read(data []byte) (n int, err error) { - return e.ch.ReadExtended(data, e.code) -} - -func (ch *channel) Accept() (Channel, <-chan *Request, error) { - if ch.decided { - return nil, nil, errDecidedAlready - } - ch.maxIncomingPayload = channelMaxPacket - confirm := channelOpenConfirmMsg{ - PeersID: ch.remoteId, - MyID: ch.localId, - MyWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - } - ch.decided = true - if err := ch.sendMessage(confirm); err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (ch *channel) Reject(reason RejectionReason, message string) error { - if ch.decided { - return errDecidedAlready - } - reject := channelOpenFailureMsg{ - PeersID: ch.remoteId, - Reason: reason, - Message: message, - Language: "en", - } - ch.decided = true - return ch.sendMessage(reject) -} - -func (ch *channel) Read(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.ReadExtended(data, 0) -} - -func (ch *channel) Write(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.WriteExtended(data, 0) -} - -func (ch *channel) CloseWrite() error { - if !ch.decided { - return errUndecided - } - ch.sentEOF = true - return ch.sendMessage(channelEOFMsg{ - PeersID: ch.remoteId}) -} - -func (ch *channel) Close() error { - if !ch.decided { - return errUndecided - } - - return ch.sendMessage(channelCloseMsg{ - PeersID: ch.remoteId}) -} - -// Extended returns an io.ReadWriter that sends and receives data on the given, -// SSH extended stream. Such streams are used, for example, for stderr. -func (ch *channel) Extended(code uint32) io.ReadWriter { - if !ch.decided { - return nil - } - return &extChannel{code, ch} -} - -func (ch *channel) Stderr() io.ReadWriter { - return ch.Extended(1) -} - -func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - if !ch.decided { - return false, errUndecided - } - - if wantReply { - ch.sentRequestMu.Lock() - defer ch.sentRequestMu.Unlock() - } - - msg := channelRequestMsg{ - PeersID: ch.remoteId, - Request: name, - WantReply: wantReply, - RequestSpecificData: payload, - } - - if err := ch.sendMessage(msg); err != nil { - return false, err - } - - if wantReply { - m, ok := (<-ch.msg) - if !ok { - return false, io.EOF - } - switch m.(type) { - case *channelRequestFailureMsg: - return false, nil - case *channelRequestSuccessMsg: - return true, nil - default: - return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) - } - } - - return false, nil -} - -// ackRequest either sends an ack or nack to the channel request. -func (ch *channel) ackRequest(ok bool) error { - if !ch.decided { - return errUndecided - } - - var msg interface{} - if !ok { - msg = channelRequestFailureMsg{ - PeersID: ch.remoteId, - } - } else { - msg = channelRequestSuccessMsg{ - PeersID: ch.remoteId, - } - } - return ch.sendMessage(msg) -} - -func (ch *channel) ChannelType() string { - return ch.chanType -} - -func (ch *channel) ExtraData() []byte { - return ch.extraData -} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go deleted file mode 100644 index e67c5e0aa..000000000 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ /dev/null @@ -1,629 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rc4" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" -) - -const ( - packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. - - // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations - // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC - // indicates implementations SHOULD be able to handle larger packet sizes, but then - // waffles on about reasonable limits. - // - // OpenSSH caps their maxPacket at 256kB so we choose to do - // the same. maxPacket is also used to ensure that uint32 - // length fields do not overflow, so it should remain well - // below 4G. - maxPacket = 256 * 1024 -) - -// noneCipher implements cipher.Stream and provides no encryption. It is used -// by the transport before the first key-exchange. -type noneCipher struct{} - -func (c noneCipher) XORKeyStream(dst, src []byte) { - copy(dst, src) -} - -func newAESCTR(key, iv []byte) (cipher.Stream, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - return cipher.NewCTR(c, iv), nil -} - -func newRC4(key, iv []byte) (cipher.Stream, error) { - return rc4.NewCipher(key) -} - -type streamCipherMode struct { - keySize int - ivSize int - skip int - createFunc func(key, iv []byte) (cipher.Stream, error) -} - -func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) { - if len(key) < c.keySize { - panic("ssh: key length too small for cipher") - } - if len(iv) < c.ivSize { - panic("ssh: iv too small for cipher") - } - - stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize]) - if err != nil { - return nil, err - } - - var streamDump []byte - if c.skip > 0 { - streamDump = make([]byte, 512) - } - - for remainingToDump := c.skip; remainingToDump > 0; { - dumpThisTime := remainingToDump - if dumpThisTime > len(streamDump) { - dumpThisTime = len(streamDump) - } - stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) - remainingToDump -= dumpThisTime - } - - return stream, nil -} - -// cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*streamCipherMode{ - // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR}, - "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR}, - "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR}, - - // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - "arcfour128": {16, 0, 1536, newRC4}, - "arcfour256": {32, 0, 1536, newRC4}, - - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC4345 introduces improved versions of Arcfour. - "arcfour": {16, 0, 0, newRC4}, - - // AES-GCM is not a stream cipher, so it is constructed with a - // special case. If we add any more non-stream ciphers, we - // should invest a cleaner way to do this. - gcmCipherID: {16, 12, 0, nil}, - - // CBC mode is insecure and so is not included in the default config. - // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely - // needed, it's possible to specify a custom Config to enable it. - // You should expect that an active attacker can recover plaintext if - // you do. - aes128cbcID: {16, aes.BlockSize, 0, nil}, - - // 3des-cbc is insecure and is disabled by default. - tripledescbcID: {24, des.BlockSize, 0, nil}, -} - -// prefixLen is the length of the packet prefix that contains the packet length -// and number of padding bytes. -const prefixLen = 5 - -// streamPacketCipher is a packetCipher using a stream cipher. -type streamPacketCipher struct { - mac hash.Hash - cipher cipher.Stream - etm bool - - // The following members are to avoid per-packet allocations. - prefix [prefixLen]byte - seqNumBytes [4]byte - padding [2 * packetSizeMultiple]byte - packetData []byte - macResult []byte -} - -// readPacket reads and decrypt a single packet from the reader argument. -func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, s.prefix[:]); err != nil { - return nil, err - } - - var encryptedPaddingLength [1]byte - if s.mac != nil && s.etm { - copy(encryptedPaddingLength[:], s.prefix[4:5]) - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } else { - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - length := binary.BigEndian.Uint32(s.prefix[0:4]) - paddingLength := uint32(s.prefix[4]) - - var macSize uint32 - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - if s.etm { - s.mac.Write(s.prefix[:4]) - s.mac.Write(encryptedPaddingLength[:]) - } else { - s.mac.Write(s.prefix[:]) - } - macSize = uint32(s.mac.Size()) - } - - if length <= paddingLength+1 { - return nil, errors.New("ssh: invalid packet length, packet too small") - } - - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - // the maxPacket check above ensures that length-1+macSize - // does not overflow. - if uint32(cap(s.packetData)) < length-1+macSize { - s.packetData = make([]byte, length-1+macSize) - } else { - s.packetData = s.packetData[:length-1+macSize] - } - - if _, err := io.ReadFull(r, s.packetData); err != nil { - return nil, err - } - mac := s.packetData[length-1:] - data := s.packetData[:length-1] - - if s.mac != nil && s.etm { - s.mac.Write(data) - } - - s.cipher.XORKeyStream(data, data) - - if s.mac != nil { - if !s.etm { - s.mac.Write(data) - } - s.macResult = s.mac.Sum(s.macResult[:0]) - if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { - return nil, errors.New("ssh: MAC failure") - } - } - - return s.packetData[:length-paddingLength-1], nil -} - -// writePacket encrypts and sends a packet of data to the writer argument -func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - if len(packet) > maxPacket { - return errors.New("ssh: packet too large") - } - - aadlen := 0 - if s.mac != nil && s.etm { - // packet length is not encrypted for EtM modes - aadlen = 4 - } - - paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple - if paddingLength < 4 { - paddingLength += packetSizeMultiple - } - - length := len(packet) + 1 + paddingLength - binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) - s.prefix[4] = byte(paddingLength) - padding := s.padding[:paddingLength] - if _, err := io.ReadFull(rand, padding); err != nil { - return err - } - - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - - if s.etm { - // For EtM algorithms, the packet length must stay unencrypted, - // but the following data (padding length) must be encrypted - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } - - s.mac.Write(s.prefix[:]) - - if !s.etm { - // For non-EtM algorithms, the algorithm is applied on unencrypted data - s.mac.Write(packet) - s.mac.Write(padding) - } - } - - if !(s.mac != nil && s.etm) { - // For EtM algorithms, the padding length has already been encrypted - // and the packet length must remain unencrypted - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - s.cipher.XORKeyStream(packet, packet) - s.cipher.XORKeyStream(padding, padding) - - if s.mac != nil && s.etm { - // For EtM algorithms, packet and padding must be encrypted - s.mac.Write(packet) - s.mac.Write(padding) - } - - if _, err := w.Write(s.prefix[:]); err != nil { - return err - } - if _, err := w.Write(packet); err != nil { - return err - } - if _, err := w.Write(padding); err != nil { - return err - } - - if s.mac != nil { - s.macResult = s.mac.Sum(s.macResult[:0]) - if _, err := w.Write(s.macResult); err != nil { - return err - } - } - - return nil -} - -type gcmCipher struct { - aead cipher.AEAD - prefix [4]byte - iv []byte - buf []byte -} - -func newGCMCipher(iv, key []byte) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - aead, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - - return &gcmCipher{ - aead: aead, - iv: iv, - }, nil -} - -const gcmTagSize = 16 - -func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - // Pad out to multiple of 16 bytes. This is different from the - // stream cipher because that encrypts the length too. - padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) - if padding < 4 { - padding += packetSizeMultiple - } - - length := uint32(len(packet) + int(padding) + 1) - binary.BigEndian.PutUint32(c.prefix[:], length) - if _, err := w.Write(c.prefix[:]); err != nil { - return err - } - - if cap(c.buf) < int(length) { - c.buf = make([]byte, length) - } else { - c.buf = c.buf[:length] - } - - c.buf[0] = padding - copy(c.buf[1:], packet) - if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { - return err - } - c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if _, err := w.Write(c.buf); err != nil { - return err - } - c.incIV() - - return nil -} - -func (c *gcmCipher) incIV() { - for i := 4 + 7; i >= 4; i-- { - c.iv[i]++ - if c.iv[i] != 0 { - break - } - } -} - -func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, c.prefix[:]); err != nil { - return nil, err - } - length := binary.BigEndian.Uint32(c.prefix[:]) - if length > maxPacket { - return nil, errors.New("ssh: max packet length exceeded") - } - - if cap(c.buf) < int(length+gcmTagSize) { - c.buf = make([]byte, length+gcmTagSize) - } else { - c.buf = c.buf[:length+gcmTagSize] - } - - if _, err := io.ReadFull(r, c.buf); err != nil { - return nil, err - } - - plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if err != nil { - return nil, err - } - c.incIV() - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding+1) >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - plain = plain[1 : length-uint32(padding)] - return plain, nil -} - -// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 -type cbcCipher struct { - mac hash.Hash - macSize uint32 - decrypter cipher.BlockMode - encrypter cipher.BlockMode - - // The following members are to avoid per-packet allocations. - seqNumBytes [4]byte - packetData []byte - macResult []byte - - // Amount of data we should still read to hide which - // verification error triggered. - oracleCamouflage uint32 -} - -func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - cbc := &cbcCipher{ - mac: macModes[algs.MAC].new(macKey), - decrypter: cipher.NewCBCDecrypter(c, iv), - encrypter: cipher.NewCBCEncrypter(c, iv), - packetData: make([]byte, 1024), - } - if cbc.mac != nil { - cbc.macSize = uint32(cbc.mac.Size()) - } - - return cbc, nil -} - -func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, iv, key, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := des.NewTripleDESCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, iv, key, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func maxUInt32(a, b int) uint32 { - if a > b { - return uint32(a) - } - return uint32(b) -} - -const ( - cbcMinPacketSizeMultiple = 8 - cbcMinPacketSize = 16 - cbcMinPaddingSize = 4 -) - -// cbcError represents a verification error that may leak information. -type cbcError string - -func (e cbcError) Error() string { return string(e) } - -func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - p, err := c.readPacketLeaky(seqNum, r) - if err != nil { - if _, ok := err.(cbcError); ok { - // Verification error: read a fixed amount of - // data, to make distinguishing between - // failing MAC and failing length check more - // difficult. - io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) - } - } - return p, err -} - -func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { - blockSize := c.decrypter.BlockSize() - - // Read the header, which will include some of the subsequent data in the - // case of block ciphers - this is copied back to the payload later. - // How many bytes of payload/padding will be read with this first read. - firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) - firstBlock := c.packetData[:firstBlockLength] - if _, err := io.ReadFull(r, firstBlock); err != nil { - return nil, err - } - - c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength - - c.decrypter.CryptBlocks(firstBlock, firstBlock) - length := binary.BigEndian.Uint32(firstBlock[:4]) - if length > maxPacket { - return nil, cbcError("ssh: packet too large") - } - if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { - // The minimum size of a packet is 16 (or the cipher block size, whichever - // is larger) bytes. - return nil, cbcError("ssh: packet too small") - } - // The length of the packet (including the length field but not the MAC) must - // be a multiple of the block size or 8, whichever is larger. - if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { - return nil, cbcError("ssh: invalid packet length multiple") - } - - paddingLength := uint32(firstBlock[4]) - if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { - return nil, cbcError("ssh: invalid packet length") - } - - // Positions within the c.packetData buffer: - macStart := 4 + length - paddingStart := macStart - paddingLength - - // Entire packet size, starting before length, ending at end of mac. - entirePacketSize := macStart + c.macSize - - // Ensure c.packetData is large enough for the entire packet data. - if uint32(cap(c.packetData)) < entirePacketSize { - // Still need to upsize and copy, but this should be rare at runtime, only - // on upsizing the packetData buffer. - c.packetData = make([]byte, entirePacketSize) - copy(c.packetData, firstBlock) - } else { - c.packetData = c.packetData[:entirePacketSize] - } - - n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) - if err != nil { - return nil, err - } - c.oracleCamouflage -= uint32(n) - - remainingCrypted := c.packetData[firstBlockLength:macStart] - c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) - - mac := c.packetData[macStart:] - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData[:macStart]) - c.macResult = c.mac.Sum(c.macResult[:0]) - if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { - return nil, cbcError("ssh: MAC failure") - } - } - - return c.packetData[prefixLen:paddingStart], nil -} - -func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) - - // Length of encrypted portion of the packet (header, payload, padding). - // Enforce minimum padding and packet size. - encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) - // Enforce block size. - encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize - - length := encLength - 4 - paddingLength := int(length) - (1 + len(packet)) - - // Overall buffer contains: header, payload, padding, mac. - // Space for the MAC is reserved in the capacity but not the slice length. - bufferSize := encLength + c.macSize - if uint32(cap(c.packetData)) < bufferSize { - c.packetData = make([]byte, encLength, bufferSize) - } else { - c.packetData = c.packetData[:encLength] - } - - p := c.packetData - - // Packet header. - binary.BigEndian.PutUint32(p, length) - p = p[4:] - p[0] = byte(paddingLength) - - // Payload. - p = p[1:] - copy(p, packet) - - // Padding. - p = p[len(packet):] - if _, err := io.ReadFull(rand, p); err != nil { - return err - } - - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData) - // The MAC is now appended into the capacity reserved for it earlier. - c.packetData = c.mac.Sum(c.packetData) - } - - c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) - - if _, err := w.Write(c.packetData); err != nil { - return err - } - - return nil -} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go deleted file mode 100644 index 6fd199455..000000000 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "net" - "os" - "sync" - "time" -) - -// Client implements a traditional SSH client that supports shells, -// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. -type Client struct { - Conn - - forwards forwardList // forwarded tcpip connections from the remote side - mu sync.Mutex - channelHandlers map[string]chan NewChannel -} - -// HandleChannelOpen returns a channel on which NewChannel requests -// for the given type are sent. If the type already is being handled, -// nil is returned. The channel is closed when the connection is closed. -func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { - c.mu.Lock() - defer c.mu.Unlock() - if c.channelHandlers == nil { - // The SSH channel has been closed. - c := make(chan NewChannel) - close(c) - return c - } - - ch := c.channelHandlers[channelType] - if ch != nil { - return nil - } - - ch = make(chan NewChannel, chanSize) - c.channelHandlers[channelType] = ch - return ch -} - -// NewClient creates a Client on top of the given connection. -func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { - conn := &Client{ - Conn: c, - channelHandlers: make(map[string]chan NewChannel, 1), - } - - go conn.handleGlobalRequests(reqs) - go conn.handleChannelOpens(chans) - go func() { - conn.Wait() - conn.forwards.closeAll() - }() - go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip")) - go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-streamlocal@openssh.com")) - return conn -} - -// NewClientConn establishes an authenticated SSH connection using c -// as the underlying transport. The Request and NewChannel channels -// must be serviced or the connection will hang. -func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.HostKeyCallback == nil { - c.Close() - return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") - } - - conn := &connection{ - sshConn: sshConn{conn: c}, - } - - if err := conn.clientHandshake(addr, &fullConf); err != nil { - c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) - } - conn.mux = newMux(conn.transport) - return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil -} - -// clientHandshake performs the client side key exchange. See RFC 4253 Section -// 7. -func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { - if config.ClientVersion != "" { - c.clientVersion = []byte(config.ClientVersion) - } else { - c.clientVersion = []byte(packageVersion) - } - var err error - c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) - if err != nil { - return err - } - - c.transport = newClientTransport( - newTransport(c.sshConn.conn, config.Rand, true /* is client */), - c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) - if err := c.transport.waitSession(); err != nil { - return err - } - - c.sessionID = c.transport.getSessionID() - return c.clientAuthenticate(config) -} - -// verifyHostKeySignature verifies the host key obtained in the key -// exchange. -func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { - sig, rest, ok := parseSignatureBody(result.Signature) - if len(rest) > 0 || !ok { - return errors.New("ssh: signature parse error") - } - - return hostKey.Verify(result.H, sig) -} - -// NewSession opens a new Session for this client. (A session is a remote -// execution of a program.) -func (c *Client) NewSession() (*Session, error) { - ch, in, err := c.OpenChannel("session", nil) - if err != nil { - return nil, err - } - return newSession(ch, in) -} - -func (c *Client) handleGlobalRequests(incoming <-chan *Request) { - for r := range incoming { - // This handles keepalive messages and matches - // the behaviour of OpenSSH. - r.Reply(false, nil) - } -} - -// handleChannelOpens channel open messages from the remote side. -func (c *Client) handleChannelOpens(in <-chan NewChannel) { - for ch := range in { - c.mu.Lock() - handler := c.channelHandlers[ch.ChannelType()] - c.mu.Unlock() - - if handler != nil { - handler <- ch - } else { - ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) - } - } - - c.mu.Lock() - for _, ch := range c.channelHandlers { - close(ch) - } - c.channelHandlers = nil - c.mu.Unlock() -} - -// Dial starts a client connection to the given SSH server. It is a -// convenience function that connects to the given network address, -// initiates the SSH handshake, and then sets up a Client. For access -// to incoming channels and requests, use net.Dial with NewClientConn -// instead. -func Dial(network, addr string, config *ClientConfig) (*Client, error) { - conn, err := net.DialTimeout(network, addr, config.Timeout) - if err != nil { - return nil, err - } - c, chans, reqs, err := NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return NewClient(c, chans, reqs), nil -} - -// HostKeyCallback is the function type used for verifying server -// keys. A HostKeyCallback must return nil if the host key is OK, or -// an error to reject it. It receives the hostname as passed to Dial -// or NewClientConn. The remote address is the RemoteAddr of the -// net.Conn underlying the the SSH connection. -type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error - -// BannerCallback is the function type used for treat the banner sent by -// the server. A BannerCallback receives the message sent by the remote server. -type BannerCallback func(message string) error - -// A ClientConfig structure is used to configure a Client. It must not be -// modified after having been passed to an SSH function. -type ClientConfig struct { - // Config contains configuration that is shared between clients and - // servers. - Config - - // User contains the username to authenticate as. - User string - - // Auth contains possible authentication methods to use with the - // server. Only the first instance of a particular RFC 4252 method will - // be used during authentication. - Auth []AuthMethod - - // HostKeyCallback is called during the cryptographic - // handshake to validate the server's host key. The client - // configuration must supply this callback for the connection - // to succeed. The functions InsecureIgnoreHostKey or - // FixedHostKey can be used for simplistic host key checks. - HostKeyCallback HostKeyCallback - - // BannerCallback is called during the SSH dance to display a custom - // server's message. The client configuration can supply this callback to - // handle it as wished. The function BannerDisplayStderr can be used for - // simplistic display on Stderr. - BannerCallback BannerCallback - - // ClientVersion contains the version identification string that will - // be used for the connection. If empty, a reasonable default is used. - ClientVersion string - - // HostKeyAlgorithms lists the key types that the client will - // accept from the server as host key, in order of - // preference. If empty, a reasonable default is used. Any - // string returned from PublicKey.Type method may be used, or - // any of the CertAlgoXxxx and KeyAlgoXxxx constants. - HostKeyAlgorithms []string - - // Timeout is the maximum amount of time for the TCP connection to establish. - // - // A Timeout of zero means no timeout. - Timeout time.Duration -} - -// InsecureIgnoreHostKey returns a function that can be used for -// ClientConfig.HostKeyCallback to accept any host key. It should -// not be used for production code. -func InsecureIgnoreHostKey() HostKeyCallback { - return func(hostname string, remote net.Addr, key PublicKey) error { - return nil - } -} - -type fixedHostKey struct { - key PublicKey -} - -func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { - if f.key == nil { - return fmt.Errorf("ssh: required host key was nil") - } - if !bytes.Equal(key.Marshal(), f.key.Marshal()) { - return fmt.Errorf("ssh: host key mismatch") - } - return nil -} - -// FixedHostKey returns a function for use in -// ClientConfig.HostKeyCallback to accept only a specific host key. -func FixedHostKey(key PublicKey) HostKeyCallback { - hk := &fixedHostKey{key} - return hk.check -} - -// BannerDisplayStderr returns a function that can be used for -// ClientConfig.BannerCallback to display banners on os.Stderr. -func BannerDisplayStderr() BannerCallback { - return func(banner string) error { - _, err := os.Stderr.WriteString(banner) - - return err - } -} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go deleted file mode 100644 index a1252cb9b..000000000 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ /dev/null @@ -1,510 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -// clientAuthenticate authenticates with the remote server. See RFC 4252. -func (c *connection) clientAuthenticate(config *ClientConfig) error { - // initiate user auth session - if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { - return err - } - packet, err := c.transport.readPacket() - if err != nil { - return err - } - var serviceAccept serviceAcceptMsg - if err := Unmarshal(packet, &serviceAccept); err != nil { - return err - } - - // during the authentication phase the client first attempts the "none" method - // then any untried methods suggested by the server. - tried := make(map[string]bool) - var lastMethods []string - - sessionID := c.transport.getSessionID() - for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) - if err != nil { - return err - } - if ok { - // success - return nil - } - tried[auth.method()] = true - if methods == nil { - methods = lastMethods - } - lastMethods = methods - - auth = nil - - findNext: - for _, a := range config.Auth { - candidateMethod := a.method() - if tried[candidateMethod] { - continue - } - for _, meth := range methods { - if meth == candidateMethod { - auth = a - break findNext - } - } - } - } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) -} - -func keys(m map[string]bool) []string { - s := make([]string, 0, len(m)) - - for key := range m { - s = append(s, key) - } - return s -} - -// An AuthMethod represents an instance of an RFC 4252 authentication method. -type AuthMethod interface { - // auth authenticates user over transport t. - // Returns true if authentication is successful. - // If authentication is not successful, a []string of alternative - // method names is returned. If the slice is nil, it will be ignored - // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error) - - // method returns the RFC 4252 method name. - method() string -} - -// "none" authentication, RFC 4252 section 5.2. -type noneAuth int - -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { - if err := c.writePacket(Marshal(&userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: "none", - })); err != nil { - return false, nil, err - } - - return handleAuthResponse(c) -} - -func (n *noneAuth) method() string { - return "none" -} - -// passwordCallback is an AuthMethod that fetches the password through -// a function call, e.g. by prompting the user. -type passwordCallback func() (password string, err error) - -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { - type passwordAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - Reply bool - Password string - } - - pw, err := cb() - // REVIEW NOTE: is there a need to support skipping a password attempt? - // The program may only find out that the user doesn't have a password - // when prompting. - if err != nil { - return false, nil, err - } - - if err := c.writePacket(Marshal(&passwordAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - Reply: false, - Password: pw, - })); err != nil { - return false, nil, err - } - - return handleAuthResponse(c) -} - -func (cb passwordCallback) method() string { - return "password" -} - -// Password returns an AuthMethod using the given password. -func Password(secret string) AuthMethod { - return passwordCallback(func() (string, error) { return secret, nil }) -} - -// PasswordCallback returns an AuthMethod that uses a callback for -// fetching a password. -func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { - return passwordCallback(prompt) -} - -type publickeyAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - // HasSig indicates to the receiver packet that the auth request is signed and - // should be used for authentication of the request. - HasSig bool - Algoname string - PubKey []byte - // Sig is tagged with "rest" so Marshal will exclude it during - // validateKey - Sig []byte `ssh:"rest"` -} - -// publicKeyCallback is an AuthMethod that uses a set of key -// pairs for authentication. -type publicKeyCallback func() ([]Signer, error) - -func (cb publicKeyCallback) method() string { - return "publickey" -} - -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { - // Authentication is performed by sending an enquiry to test if a key is - // acceptable to the remote. If the key is acceptable, the client will - // attempt to authenticate with the valid key. If not the client will repeat - // the process with the remaining keys. - - signers, err := cb() - if err != nil { - return false, nil, err - } - var methods []string - for _, signer := range signers { - ok, err := validateKey(signer.PublicKey(), user, c) - if err != nil { - return false, nil, err - } - if !ok { - continue - } - - pub := signer.PublicKey() - pubKey := pub.Marshal() - sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - }, []byte(pub.Type()), pubKey)) - if err != nil { - return false, nil, err - } - - // manually wrap the serialized signature in a string - s := Marshal(sign) - sig := make([]byte, stringLength(len(s))) - marshalString(sig, s) - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - HasSig: true, - Algoname: pub.Type(), - PubKey: pubKey, - Sig: sig, - } - p := Marshal(&msg) - if err := c.writePacket(p); err != nil { - return false, nil, err - } - var success bool - success, methods, err = handleAuthResponse(c) - if err != nil { - return false, nil, err - } - - // If authentication succeeds or the list of available methods does not - // contain the "publickey" method, do not attempt to authenticate with any - // other keys. According to RFC 4252 Section 7, the latter can occur when - // additional authentication methods are required. - if success || !containsMethod(methods, cb.method()) { - return success, methods, err - } - } - - return false, methods, nil -} - -func containsMethod(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - - return false -} - -// validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, user string, c packetConn) (bool, error) { - pubKey := key.Marshal() - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: "publickey", - HasSig: false, - Algoname: key.Type(), - PubKey: pubKey, - } - if err := c.writePacket(Marshal(&msg)); err != nil { - return false, err - } - - return confirmKeyAck(key, c) -} - -func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { - pubKey := key.Marshal() - algoname := key.Type() - - for { - packet, err := c.readPacket() - if err != nil { - return false, err - } - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return false, err - } - case msgUserAuthPubKeyOk: - var msg userAuthPubKeyOkMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, err - } - if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { - return false, nil - } - return true, nil - case msgUserAuthFailure: - return false, nil - default: - return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -// PublicKeys returns an AuthMethod that uses the given key -// pairs. -func PublicKeys(signers ...Signer) AuthMethod { - return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) -} - -// PublicKeysCallback returns an AuthMethod that runs the given -// function to obtain a list of key pairs. -func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { - return publicKeyCallback(getSigners) -} - -// handleAuthResponse returns whether the preceding authentication request succeeded -// along with a list of remaining authentication methods to try next and -// an error if an unexpected response was received. -func handleAuthResponse(c packetConn) (bool, []string, error) { - for { - packet, err := c.readPacket() - if err != nil { - return false, nil, err - } - - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return false, nil, err - } - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, nil, err - } - return false, msg.Methods, nil - case msgUserAuthSuccess: - return true, nil, nil - default: - return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -func handleBannerResponse(c packetConn, packet []byte) error { - var msg userAuthBannerMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - transport, ok := c.(*handshakeTransport) - if !ok { - return nil - } - - if transport.bannerCallback != nil { - return transport.bannerCallback(msg.Message) - } - - return nil -} - -// KeyboardInteractiveChallenge should print questions, optionally -// disabling echoing (e.g. for passwords), and return all the answers. -// Challenge may be called multiple times in a single session. After -// successful authentication, the server may send a challenge with no -// questions, for which the user and instruction messages should be -// printed. RFC 4256 section 3.3 details how the UI should behave for -// both CLI and GUI environments. -type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) - -// KeyboardInteractive returns an AuthMethod using a prompt/response -// sequence controlled by the server. -func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { - return challenge -} - -func (cb KeyboardInteractiveChallenge) method() string { - return "keyboard-interactive" -} - -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { - type initiateMsg struct { - User string `sshtype:"50"` - Service string - Method string - Language string - Submethods string - } - - if err := c.writePacket(Marshal(&initiateMsg{ - User: user, - Service: serviceSSH, - Method: "keyboard-interactive", - })); err != nil { - return false, nil, err - } - - for { - packet, err := c.readPacket() - if err != nil { - return false, nil, err - } - - // like handleAuthResponse, but with less options. - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return false, nil, err - } - continue - case msgUserAuthInfoRequest: - // OK - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, nil, err - } - return false, msg.Methods, nil - case msgUserAuthSuccess: - return true, nil, nil - default: - return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - - var msg userAuthInfoRequestMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, nil, err - } - - // Manually unpack the prompt/echo pairs. - rest := msg.Prompts - var prompts []string - var echos []bool - for i := 0; i < int(msg.NumPrompts); i++ { - prompt, r, ok := parseString(rest) - if !ok || len(r) == 0 { - return false, nil, errors.New("ssh: prompt format error") - } - prompts = append(prompts, string(prompt)) - echos = append(echos, r[0] != 0) - rest = r[1:] - } - - if len(rest) != 0 { - return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs") - } - - answers, err := cb(msg.User, msg.Instruction, prompts, echos) - if err != nil { - return false, nil, err - } - - if len(answers) != len(prompts) { - return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") - } - responseLength := 1 + 4 - for _, a := range answers { - responseLength += stringLength(len(a)) - } - serialized := make([]byte, responseLength) - p := serialized - p[0] = msgUserAuthInfoResponse - p = p[1:] - p = marshalUint32(p, uint32(len(answers))) - for _, a := range answers { - p = marshalString(p, []byte(a)) - } - - if err := c.writePacket(serialized); err != nil { - return false, nil, err - } - } -} - -type retryableAuthMethod struct { - authMethod AuthMethod - maxTries int -} - -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) { - for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand) - if ok || err != nil { // either success or error terminate - return ok, methods, err - } - } - return ok, methods, err -} - -func (r *retryableAuthMethod) method() string { - return r.authMethod.method() -} - -// RetryableAuthMethod is a decorator for other auth methods enabling them to -// be retried up to maxTries before considering that AuthMethod itself failed. -// If maxTries is <= 0, will retry indefinitely -// -// This is useful for interactive clients using challenge/response type -// authentication (e.g. Keyboard-Interactive, Password, etc) where the user -// could mistype their response resulting in the server issuing a -// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 -// [keyboard-interactive]); Without this decorator, the non-retryable -// AuthMethod would be removed from future consideration, and never tried again -// (and so the user would never be able to retry their entry). -func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { - return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} -} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go deleted file mode 100644 index 135b4edd7..000000000 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/rand" - "fmt" - "io" - "math" - "sync" - - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" -) - -// These are string constants in the SSH protocol. -const ( - compressionNone = "none" - serviceUserAuth = "ssh-userauth" - serviceSSH = "ssh-connection" -) - -// supportedCiphers specifies the supported ciphers in preference order. -var supportedCiphers = []string{ - "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", - "arcfour256", "arcfour128", -} - -// supportedKexAlgos specifies the supported key-exchange algorithms in -// preference order. -var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, - // P384 and P521 are not constant-time yet, but since we don't - // reuse ephemeral keys, using them for ECDH should be OK. - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, kexAlgoDH1SHA1, -} - -// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods -// of authenticating servers) in preference order. -var supportedHostKeyAlgos = []string{ - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, - - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSA, KeyAlgoDSA, - - KeyAlgoED25519, -} - -// supportedMACs specifies a default set of MAC algorithms in preference order. -// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed -// because they have reached the end of their useful life. -var supportedMACs = []string{ - "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", -} - -var supportedCompressions = []string{compressionNone} - -// hashFuncs keeps the mapping of supported algorithms to their respective -// hashes needed for signature verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertAlgoRSAv01: crypto.SHA1, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, -} - -// unexpectedMessageError results when the SSH message that we received didn't -// match what we wanted. -func unexpectedMessageError(expected, got uint8) error { - return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) -} - -// parseError results from a malformed SSH message. -func parseError(tag uint8) error { - return fmt.Errorf("ssh: parse error in message type %d", tag) -} - -func findCommon(what string, client []string, server []string) (common string, err error) { - for _, c := range client { - for _, s := range server { - if c == s { - return c, nil - } - } - } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) -} - -type directionAlgorithms struct { - Cipher string - MAC string - Compression string -} - -// rekeyBytes returns a rekeying intervals in bytes. -func (a *directionAlgorithms) rekeyBytes() int64 { - // According to RFC4344 block ciphers should rekey after - // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is - // 128. - switch a.Cipher { - case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: - return 16 * (1 << 32) - - } - - // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. - return 1 << 30 -} - -type algorithms struct { - kex string - hostKey string - w directionAlgorithms - r directionAlgorithms -} - -func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { - result := &algorithms{} - - result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) - if err != nil { - return - } - - result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) - if err != nil { - return - } - - result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) - if err != nil { - return - } - - result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) - if err != nil { - return - } - - result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return - } - - result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return - } - - result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) - if err != nil { - return - } - - result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) - if err != nil { - return - } - - return result, nil -} - -// If rekeythreshold is too small, we can't make any progress sending -// stuff. -const minRekeyThreshold uint64 = 256 - -// Config contains configuration data common to both ServerConfig and -// ClientConfig. -type Config struct { - // Rand provides the source of entropy for cryptographic - // primitives. If Rand is nil, the cryptographic random reader - // in package crypto/rand will be used. - Rand io.Reader - - // The maximum number of bytes sent or received after which a - // new key is negotiated. It must be at least 256. If - // unspecified, a size suitable for the chosen cipher is used. - RekeyThreshold uint64 - - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. - KeyExchanges []string - - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. - Ciphers []string - - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. - MACs []string -} - -// SetDefaults sets sensible values for unset fields in config. This is -// exported for testing: Configs passed to SSH functions are copied and have -// default values set automatically. -func (c *Config) SetDefaults() { - if c.Rand == nil { - c.Rand = rand.Reader - } - if c.Ciphers == nil { - c.Ciphers = supportedCiphers - } - var ciphers []string - for _, c := range c.Ciphers { - if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition - ciphers = append(ciphers, c) - } - } - c.Ciphers = ciphers - - if c.KeyExchanges == nil { - c.KeyExchanges = supportedKexAlgos - } - - if c.MACs == nil { - c.MACs = supportedMACs - } - - if c.RekeyThreshold == 0 { - // cipher specific default - } else if c.RekeyThreshold < minRekeyThreshold { - c.RekeyThreshold = minRekeyThreshold - } else if c.RekeyThreshold >= math.MaxInt64 { - // Avoid weirdness if somebody uses -1 as a threshold. - c.RekeyThreshold = math.MaxInt64 - } -} - -// buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { - data := struct { - Session []byte - Type byte - User string - Service string - Method string - Sign bool - Algo []byte - PubKey []byte - }{ - sessionID, - msgUserAuthRequest, - req.User, - req.Service, - req.Method, - true, - algo, - pubKey, - } - return Marshal(data) -} - -func appendU16(buf []byte, n uint16) []byte { - return append(buf, byte(n>>8), byte(n)) -} - -func appendU32(buf []byte, n uint32) []byte { - return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendU64(buf []byte, n uint64) []byte { - return append(buf, - byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), - byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendInt(buf []byte, n int) []byte { - return appendU32(buf, uint32(n)) -} - -func appendString(buf []byte, s string) []byte { - buf = appendU32(buf, uint32(len(s))) - buf = append(buf, s...) - return buf -} - -func appendBool(buf []byte, b bool) []byte { - if b { - return append(buf, 1) - } - return append(buf, 0) -} - -// newCond is a helper to hide the fact that there is no usable zero -// value for sync.Cond. -func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } - -// window represents the buffer available to clients -// wishing to write to a channel. -type window struct { - *sync.Cond - win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 - writeWaiters int - closed bool -} - -// add adds win to the amount of window available -// for consumers. -func (w *window) add(win uint32) bool { - // a zero sized window adjust is a noop. - if win == 0 { - return true - } - w.L.Lock() - if w.win+win < win { - w.L.Unlock() - return false - } - w.win += win - // It is unusual that multiple goroutines would be attempting to reserve - // window space, but not guaranteed. Use broadcast to notify all waiters - // that additional window is available. - w.Broadcast() - w.L.Unlock() - return true -} - -// close sets the window to closed, so all reservations fail -// immediately. -func (w *window) close() { - w.L.Lock() - w.closed = true - w.Broadcast() - w.L.Unlock() -} - -// reserve reserves win from the available window capacity. -// If no capacity remains, reserve will block. reserve may -// return less than requested. -func (w *window) reserve(win uint32) (uint32, error) { - var err error - w.L.Lock() - w.writeWaiters++ - w.Broadcast() - for w.win == 0 && !w.closed { - w.Wait() - } - w.writeWaiters-- - if w.win < win { - win = w.win - } - w.win -= win - if w.closed { - err = io.EOF - } - w.L.Unlock() - return win, err -} - -// waitWriterBlocked waits until some goroutine is blocked for further -// writes. It is used in tests only. -func (w *window) waitWriterBlocked() { - w.Cond.L.Lock() - for w.writeWaiters == 0 { - w.Cond.Wait() - } - w.Cond.L.Unlock() -} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go deleted file mode 100644 index fd6b0681b..000000000 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "fmt" - "net" -) - -// OpenChannelError is returned if the other side rejects an -// OpenChannel request. -type OpenChannelError struct { - Reason RejectionReason - Message string -} - -func (e *OpenChannelError) Error() string { - return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) -} - -// ConnMetadata holds metadata for the connection. -type ConnMetadata interface { - // User returns the user ID for this connection. - User() string - - // SessionID returns the session hash, also denoted by H. - SessionID() []byte - - // ClientVersion returns the client's version string as hashed - // into the session ID. - ClientVersion() []byte - - // ServerVersion returns the server's version string as hashed - // into the session ID. - ServerVersion() []byte - - // RemoteAddr returns the remote address for this connection. - RemoteAddr() net.Addr - - // LocalAddr returns the local address for this connection. - LocalAddr() net.Addr -} - -// Conn represents an SSH connection for both server and client roles. -// Conn is the basis for implementing an application layer, such -// as ClientConn, which implements the traditional shell access for -// clients. -type Conn interface { - ConnMetadata - - // SendRequest sends a global request, and returns the - // reply. If wantReply is true, it returns the response status - // and payload. See also RFC4254, section 4. - SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) - - // OpenChannel tries to open an channel. If the request is - // rejected, it returns *OpenChannelError. On success it returns - // the SSH Channel and a Go channel for incoming, out-of-band - // requests. The Go channel must be serviced, or the - // connection will hang. - OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) - - // Close closes the underlying network connection - Close() error - - // Wait blocks until the connection has shut down, and returns the - // error causing the shutdown. - Wait() error - - // TODO(hanwen): consider exposing: - // RequestKeyChange - // Disconnect -} - -// DiscardRequests consumes and rejects all requests from the -// passed-in channel. -func DiscardRequests(in <-chan *Request) { - for req := range in { - if req.WantReply { - req.Reply(false, nil) - } - } -} - -// A connection represents an incoming connection. -type connection struct { - transport *handshakeTransport - sshConn - - // The connection protocol. - *mux -} - -func (c *connection) Close() error { - return c.sshConn.conn.Close() -} - -// sshconn provides net.Conn metadata, but disallows direct reads and -// writes. -type sshConn struct { - conn net.Conn - - user string - sessionID []byte - clientVersion []byte - serverVersion []byte -} - -func dup(src []byte) []byte { - dst := make([]byte, len(src)) - copy(dst, src) - return dst -} - -func (c *sshConn) User() string { - return c.user -} - -func (c *sshConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *sshConn) Close() error { - return c.conn.Close() -} - -func (c *sshConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *sshConn) SessionID() []byte { - return dup(c.sessionID) -} - -func (c *sshConn) ClientVersion() []byte { - return dup(c.clientVersion) -} - -func (c *sshConn) ServerVersion() []byte { - return dup(c.serverVersion) -} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go deleted file mode 100644 index 67b7322c0..000000000 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package ssh implements an SSH client and server. - -SSH is a transport security protocol, an authentication protocol and a -family of application protocols. The most typical application level -protocol is a remote shell and this is specifically implemented. However, -the multiplexed nature of SSH is exposed to users that wish to support -others. - -References: - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 - -This package does not fall under the stability promise of the Go language itself, -so its API may be changed when pressing needs arise. -*/ -package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go deleted file mode 100644 index 4f7912ecd..000000000 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ /dev/null @@ -1,646 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "log" - "net" - "sync" -) - -// debugHandshake, if set, prints messages sent and received. Key -// exchange messages are printed as if DH were used, so the debug -// messages are wrong when using ECDH. -const debugHandshake = false - -// chanSize sets the amount of buffering SSH connections. This is -// primarily for testing: setting chanSize=0 uncovers deadlocks more -// quickly. -const chanSize = 16 - -// keyingTransport is a packet based transport that supports key -// changes. It need not be thread-safe. It should pass through -// msgNewKeys in both directions. -type keyingTransport interface { - packetConn - - // prepareKeyChange sets up a key change. The key change for a - // direction will be effected if a msgNewKeys message is sent - // or received. - prepareKeyChange(*algorithms, *kexResult) error -} - -// handshakeTransport implements rekeying on top of a keyingTransport -// and offers a thread-safe writePacket() interface. -type handshakeTransport struct { - conn keyingTransport - config *Config - - serverVersion []byte - clientVersion []byte - - // hostKeys is non-empty if we are the server. In that case, - // it contains all host keys that can be used to sign the - // connection. - hostKeys []Signer - - // hostKeyAlgorithms is non-empty if we are the client. In that case, - // we accept these key types from the server as host key. - hostKeyAlgorithms []string - - // On read error, incoming is closed, and readError is set. - incoming chan []byte - readError error - - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. - - // If the read loop wants to schedule a kex, it pings this - // channel, and the write loop will send out a kex - // message. - requestKex chan struct{} - - // If the other side requests or confirms a kex, its kexInit - // packet is sent here for the write loop to find it. - startKex chan *pendingKex - - // data for host key checking - hostKeyCallback HostKeyCallback - dialAddress string - remoteAddr net.Addr - - // bannerCallback is non-empty if we are the client and it has been set in - // ClientConfig. In that case it is called during the user authentication - // dance to handle a custom server's message. - bannerCallback BannerCallback - - // Algorithms agreed in the last key exchange. - algorithms *algorithms - - readPacketsLeft uint32 - readBytesLeft int64 - - writePacketsLeft uint32 - writeBytesLeft int64 - - // The session ID or nil if first kex did not complete yet. - sessionID []byte -} - -type pendingKex struct { - otherInit []byte - done chan error -} - -func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { - t := &handshakeTransport{ - conn: conn, - serverVersion: serverVersion, - clientVersion: clientVersion, - incoming: make(chan []byte, chanSize), - requestKex: make(chan struct{}, 1), - startKex: make(chan *pendingKex, 1), - - config: config, - } - t.resetReadThresholds() - t.resetWriteThresholds() - - // We always start with a mandatory key exchange. - t.requestKex <- struct{}{} - return t -} - -func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.dialAddress = dialAddr - t.remoteAddr = addr - t.hostKeyCallback = config.HostKeyCallback - t.bannerCallback = config.BannerCallback - if config.HostKeyAlgorithms != nil { - t.hostKeyAlgorithms = config.HostKeyAlgorithms - } else { - t.hostKeyAlgorithms = supportedHostKeyAlgos - } - go t.readLoop() - go t.kexLoop() - return t -} - -func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.hostKeys = config.hostKeys - go t.readLoop() - go t.kexLoop() - return t -} - -func (t *handshakeTransport) getSessionID() []byte { - return t.sessionID -} - -// waitSession waits for the session to be established. This should be -// the first thing to call after instantiating handshakeTransport. -func (t *handshakeTransport) waitSession() error { - p, err := t.readPacket() - if err != nil { - return err - } - if p[0] != msgNewKeys { - return fmt.Errorf("ssh: first packet should be msgNewKeys") - } - - return nil -} - -func (t *handshakeTransport) id() string { - if len(t.hostKeys) > 0 { - return "server" - } - return "client" -} - -func (t *handshakeTransport) printPacket(p []byte, write bool) { - action := "got" - if write { - action = "sent" - } - - if p[0] == msgChannelData || p[0] == msgChannelExtendedData { - log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) - } else { - msg, err := decode(p) - log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) - } -} - -func (t *handshakeTransport) readPacket() ([]byte, error) { - p, ok := <-t.incoming - if !ok { - return nil, t.readError - } - return p, nil -} - -func (t *handshakeTransport) readLoop() { - first := true - for { - p, err := t.readOnePacket(first) - first = false - if err != nil { - t.readError = err - close(t.incoming) - break - } - if p[0] == msgIgnore || p[0] == msgDebug { - continue - } - t.incoming <- p - } - - // Stop writers too. - t.recordWriteError(t.readError) - - // Unblock the writer should it wait for this. - close(t.startKex) - - // Don't close t.requestKex; it's also written to from writePacket. -} - -func (t *handshakeTransport) pushPacket(p []byte) error { - if debugHandshake { - t.printPacket(p, true) - } - return t.conn.writePacket(p) -} - -func (t *handshakeTransport) getWriteError() error { - t.mu.Lock() - defer t.mu.Unlock() - return t.writeError -} - -func (t *handshakeTransport) recordWriteError(err error) { - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError == nil && err != nil { - t.writeError = err - } -} - -func (t *handshakeTransport) requestKeyExchange() { - select { - case t.requestKex <- struct{}{}: - default: - // something already requested a kex, so do nothing. - } -} - -func (t *handshakeTransport) resetWriteThresholds() { - t.writePacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.writeBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.writeBytesLeft = t.algorithms.w.rekeyBytes() - } else { - t.writeBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) kexLoop() { - -write: - for t.getWriteError() == nil { - var request *pendingKex - var sent bool - - for request == nil || !sent { - var ok bool - select { - case request, ok = <-t.startKex: - if !ok { - break write - } - case <-t.requestKex: - break - } - - if !sent { - if err := t.sendKexInit(); err != nil { - t.recordWriteError(err) - break - } - sent = true - } - } - - if err := t.getWriteError(); err != nil { - if request != nil { - request.done <- err - } - break - } - - // We're not servicing t.requestKex, but that is OK: - // we never block on sending to t.requestKex. - - // We're not servicing t.startKex, but the remote end - // has just sent us a kexInitMsg, so it can't send - // another key change request, until we close the done - // channel on the pendingKex request. - - err := t.enterKeyExchange(request.otherInit) - - t.mu.Lock() - t.writeError = err - t.sentInitPacket = nil - t.sentInitMsg = nil - - t.resetWriteThresholds() - - // we have completed the key exchange. Since the - // reader is still blocked, it is safe to clear out - // the requestKex channel. This avoids the situation - // where: 1) we consumed our own request for the - // initial kex, and 2) the kex from the remote side - // caused another send on the requestKex channel, - clear: - for { - select { - case <-t.requestKex: - // - default: - break clear - } - } - - request.done <- t.writeError - - // kex finished. Push packets that we received while - // the kex was in progress. Don't look at t.startKex - // and don't increment writtenSinceKex: if we trigger - // another kex while we are still busy with the last - // one, things will become very confusing. - for _, p := range t.pendingPackets { - t.writeError = t.pushPacket(p) - if t.writeError != nil { - break - } - } - t.pendingPackets = t.pendingPackets[:0] - t.mu.Unlock() - } - - // drain startKex channel. We don't service t.requestKex - // because nobody does blocking sends there. - go func() { - for init := range t.startKex { - init.done <- t.writeError - } - }() - - // Unblock reader. - t.conn.Close() -} - -// The protocol uses uint32 for packet counters, so we can't let them -// reach 1<<32. We will actually read and write more packets than -// this, though: the other side may send more packets, and after we -// hit this limit on writing we will send a few more packets for the -// key exchange itself. -const packetRekeyThreshold = (1 << 31) - -func (t *handshakeTransport) resetReadThresholds() { - t.readPacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.readBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.readBytesLeft = t.algorithms.r.rekeyBytes() - } else { - t.readBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { - p, err := t.conn.readPacket() - if err != nil { - return nil, err - } - - if t.readPacketsLeft > 0 { - t.readPacketsLeft-- - } else { - t.requestKeyExchange() - } - - if t.readBytesLeft > 0 { - t.readBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if debugHandshake { - t.printPacket(p, false) - } - - if first && p[0] != msgKexInit { - return nil, fmt.Errorf("ssh: first packet should be msgKexInit") - } - - if p[0] != msgKexInit { - return p, nil - } - - firstKex := t.sessionID == nil - - kex := pendingKex{ - done: make(chan error, 1), - otherInit: p, - } - t.startKex <- &kex - err = <-kex.done - - if debugHandshake { - log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) - } - - if err != nil { - return nil, err - } - - t.resetReadThresholds() - - // By default, a key exchange is hidden from higher layers by - // translating it into msgIgnore. - successPacket := []byte{msgIgnore} - if firstKex { - // sendKexInit() for the first kex waits for - // msgNewKeys so the authentication process is - // guaranteed to happen over an encrypted transport. - successPacket = []byte{msgNewKeys} - } - - return successPacket, nil -} - -// sendKexInit sends a key change message. -func (t *handshakeTransport) sendKexInit() error { - t.mu.Lock() - defer t.mu.Unlock() - if t.sentInitMsg != nil { - // kexInits may be sent either in response to the other side, - // or because our side wants to initiate a key change, so we - // may have already sent a kexInit. In that case, don't send a - // second kexInit. - return nil - } - - msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, - CiphersClientServer: t.config.Ciphers, - CiphersServerClient: t.config.Ciphers, - MACsClientServer: t.config.MACs, - MACsServerClient: t.config.MACs, - CompressionClientServer: supportedCompressions, - CompressionServerClient: supportedCompressions, - } - io.ReadFull(rand.Reader, msg.Cookie[:]) - - if len(t.hostKeys) > 0 { - for _, k := range t.hostKeys { - msg.ServerHostKeyAlgos = append( - msg.ServerHostKeyAlgos, k.PublicKey().Type()) - } - } else { - msg.ServerHostKeyAlgos = t.hostKeyAlgorithms - } - packet := Marshal(msg) - - // writePacket destroys the contents, so save a copy. - packetCopy := make([]byte, len(packet)) - copy(packetCopy, packet) - - if err := t.pushPacket(packetCopy); err != nil { - return err - } - - t.sentInitMsg = msg - t.sentInitPacket = packet - - return nil -} - -func (t *handshakeTransport) writePacket(p []byte) error { - switch p[0] { - case msgKexInit: - return errors.New("ssh: only handshakeTransport can send kexInit") - case msgNewKeys: - return errors.New("ssh: only handshakeTransport can send newKeys") - } - - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError != nil { - return t.writeError - } - - if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil - } - - if t.writeBytesLeft > 0 { - t.writeBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if t.writePacketsLeft > 0 { - t.writePacketsLeft-- - } else { - t.requestKeyExchange() - } - - if err := t.pushPacket(p); err != nil { - t.writeError = err - } - - return nil -} - -func (t *handshakeTransport) Close() error { - return t.conn.Close() -} - -func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { - if debugHandshake { - log.Printf("%s entered key exchange", t.id()) - } - - otherInit := &kexInitMsg{} - if err := Unmarshal(otherInitPacket, otherInit); err != nil { - return err - } - - magics := handshakeMagics{ - clientVersion: t.clientVersion, - serverVersion: t.serverVersion, - clientKexInit: otherInitPacket, - serverKexInit: t.sentInitPacket, - } - - clientInit := otherInit - serverInit := t.sentInitMsg - if len(t.hostKeys) == 0 { - clientInit, serverInit = serverInit, clientInit - - magics.clientKexInit = t.sentInitPacket - magics.serverKexInit = otherInitPacket - } - - var err error - t.algorithms, err = findAgreedAlgorithms(clientInit, serverInit) - if err != nil { - return err - } - - // We don't send FirstKexFollows, but we handle receiving it. - // - // RFC 4253 section 7 defines the kex and the agreement method for - // first_kex_packet_follows. It states that the guessed packet - // should be ignored if the "kex algorithm and/or the host - // key algorithm is guessed wrong (server and client have - // different preferred algorithm), or if any of the other - // algorithms cannot be agreed upon". The other algorithms have - // already been checked above so the kex algorithm and host key - // algorithm are checked here. - if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { - // other side sent a kex message for the wrong algorithm, - // which we have to ignore. - if _, err := t.conn.readPacket(); err != nil { - return err - } - } - - kex, ok := kexAlgoMap[t.algorithms.kex] - if !ok { - return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) - } - - var result *kexResult - if len(t.hostKeys) > 0 { - result, err = t.server(kex, t.algorithms, &magics) - } else { - result, err = t.client(kex, t.algorithms, &magics) - } - - if err != nil { - return err - } - - if t.sessionID == nil { - t.sessionID = result.H - } - result.SessionID = t.sessionID - - if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { - return err - } - if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { - return err - } - if packet, err := t.conn.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - - return nil -} - -func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - var hostKey Signer - for _, k := range t.hostKeys { - if algs.hostKey == k.PublicKey().Type() { - hostKey = k - } - } - - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) - return r, err -} - -func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - result, err := kex.Client(t.conn, t.config.Rand, magics) - if err != nil { - return nil, err - } - - hostKey, err := ParsePublicKey(result.HostKey) - if err != nil { - return nil, err - } - - if err := verifyHostKeySignature(hostKey, result); err != nil { - return nil, err - } - - err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) - if err != nil { - return nil, err - } - - return result, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go deleted file mode 100644 index f34bcc013..000000000 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ /dev/null @@ -1,540 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" - - "golang.org/x/crypto/curve25519" -) - -const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" -) - -// kexResult captures the outcome of a key exchange. -type kexResult struct { - // Session hash. See also RFC 4253, section 8. - H []byte - - // Shared secret. See also RFC 4253, section 8. - K []byte - - // Host key as hashed into H. - HostKey []byte - - // Signature of H. - Signature []byte - - // A cryptographic hash function that matches the security - // level of the key exchange algorithm. It is used for - // calculating H, and for deriving keys from H and K. - Hash crypto.Hash - - // The session ID, which is the first H computed. This is used - // to derive key material inside the transport. - SessionID []byte -} - -// handshakeMagics contains data that is always included in the -// session hash. -type handshakeMagics struct { - clientVersion, serverVersion []byte - clientKexInit, serverKexInit []byte -} - -func (m *handshakeMagics) write(w io.Writer) { - writeString(w, m.clientVersion) - writeString(w, m.serverVersion) - writeString(w, m.clientKexInit) - writeString(w, m.serverKexInit) -} - -// kexAlgorithm abstracts different key exchange algorithms. -type kexAlgorithm interface { - // Server runs server-side key agreement, signing the result - // with a hostkey. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) - - // Client runs the client-side key agreement. Caller is - // responsible for verifying the host key signature. - Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) -} - -// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. -type dhGroup struct { - g, p, pMinus1 *big.Int -} - -func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil -} - -func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - hashFunc := crypto.SHA1 - - var x *big.Int - for { - var err error - if x, err = rand.Int(randSource, group.pMinus1); err != nil { - return nil, err - } - if x.Sign() > 0 { - break - } - } - - X := new(big.Int).Exp(group.g, x, group.p) - kexDHInit := kexDHInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHInit)); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHReply kexDHReplyMsg - if err = Unmarshal(packet, &kexDHReply); err != nil { - return nil, err - } - - ki, err := group.diffieHellman(kexDHReply.Y, x) - if err != nil { - return nil, err - } - - h := hashFunc.New() - magics.write(h) - writeString(h, kexDHReply.HostKey) - writeInt(h, X) - writeInt(h, kexDHReply.Y) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHReply.HostKey, - Signature: kexDHReply.Signature, - Hash: crypto.SHA1, - }, nil -} - -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - hashFunc := crypto.SHA1 - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHInit kexDHInitMsg - if err = Unmarshal(packet, &kexDHInit); err != nil { - return - } - - var y *big.Int - for { - if y, err = rand.Int(randSource, group.pMinus1); err != nil { - return - } - if y.Sign() > 0 { - break - } - } - - Y := new(big.Int).Exp(group.g, y, group.p) - ki, err := group.diffieHellman(kexDHInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeInt(h, kexDHInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) - if err != nil { - return nil, err - } - - kexDHReply := kexDHReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHReply) - - err = c.writePacket(packet) - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA1, - }, nil -} - -// ecdh performs Elliptic Curve Diffie-Hellman key exchange as -// described in RFC 5656, section 4. -type ecdh struct { - curve elliptic.Curve -} - -func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - kexInit := kexECDHInitMsg{ - ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), - } - - serialized := Marshal(&kexInit) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - - x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) - if err != nil { - return nil, err - } - - // generate shared secret - secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kexInit.ClientPubKey) - writeString(h, reply.EphemeralPubKey) - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: ecHash(kex.curve), - }, nil -} - -// unmarshalECKey parses and checks an EC key. -func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { - x, y = elliptic.Unmarshal(curve, pubkey) - if x == nil { - return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") - } - if !validateECPublicKey(curve, x, y) { - return nil, nil, errors.New("ssh: public key not on curve") - } - return x, y, nil -} - -// validateECPublicKey checks that the point is a valid public key for -// the given curve. See [SEC1], 3.2.2 -func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { - if x.Sign() == 0 && y.Sign() == 0 { - return false - } - - if x.Cmp(curve.Params().P) >= 0 { - return false - } - - if y.Cmp(curve.Params().P) >= 0 { - return false - } - - if !curve.IsOnCurve(x, y) { - return false - } - - // We don't check if N * PubKey == 0, since - // - // - the NIST curves have cofactor = 1, so this is implicit. - // (We don't foresee an implementation that supports non NIST - // curves) - // - // - for ephemeral keys, we don't need to worry about small - // subgroup attacks. - return true -} - -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexECDHInit kexECDHInitMsg - if err = Unmarshal(packet, &kexECDHInit); err != nil { - return nil, err - } - - clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) - if err != nil { - return nil, err - } - - // We could cache this key across multiple users/multiple - // connection attempts, but the benefit is small. OpenSSH - // generates a new key for each incoming connection. - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) - - // generate shared secret - secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexECDHInit.ClientPubKey) - writeString(h, serializedEphKey) - - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: serializedEphKey, - HostKey: hostKeyBytes, - Signature: sig, - } - - serialized := Marshal(&reply) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - return &kexResult{ - H: H, - K: K, - HostKey: reply.HostKey, - Signature: sig, - Hash: ecHash(kex.curve), - }, nil -} - -var kexAlgoMap = map[string]kexAlgorithm{} - -func init() { - // This is the group called diffie-hellman-group1-sha1 in RFC - // 4253 and Oakley Group 2 in RFC 2409. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) - kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} - kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} - kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} - kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} -} - -// curve25519sha256 implements the curve25519-sha256@libssh.org key -// agreement protocol, as described in -// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt -type curve25519sha256 struct{} - -type curve25519KeyPair struct { - priv [32]byte - pub [32]byte -} - -func (kp *curve25519KeyPair) generate(rand io.Reader) error { - if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { - return err - } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) - return nil -} - -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - -func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - if len(reply.EphemeralPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kp.pub[:]) - writeString(h, reply.EphemeralPubKey) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: crypto.SHA256, - }, nil -} - -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexInit kexECDHInitMsg - if err = Unmarshal(packet, &kexInit); err != nil { - return - } - - if len(kexInit.ClientPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexInit.ClientPubKey) - writeString(h, kp.pub[:]) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: kp.pub[:], - HostKey: hostKeyBytes, - Signature: sig, - } - if err := c.writePacket(Marshal(&reply)); err != nil { - return nil, err - } - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA256, - }, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go deleted file mode 100644 index dadf41ab7..000000000 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ /dev/null @@ -1,1031 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "strings" - - "golang.org/x/crypto/ed25519" -) - -// These constants represent the algorithm names for key types supported by this -// package. -const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" -) - -// parsePubKey parses a public key of the given algorithm. -// Use ParsePublicKey for keys with prepended algorithm. -func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { - switch algo { - case KeyAlgoRSA: - return parseRSA(in) - case KeyAlgoDSA: - return parseDSA(in) - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - return parseECDSA(in) - case KeyAlgoED25519: - return parseED25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: - cert, err := parseCert(in, certToPrivAlgo(algo)) - if err != nil { - return nil, nil, err - } - return cert, nil, nil - } - return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) -} - -// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format -// (see sshd(8) manual page) once the options and key type fields have been -// removed. -func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { - in = bytes.TrimSpace(in) - - i := bytes.IndexAny(in, " \t") - if i == -1 { - i = len(in) - } - base64Key := in[:i] - - key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) - n, err := base64.StdEncoding.Decode(key, base64Key) - if err != nil { - return nil, "", err - } - key = key[:n] - out, err = ParsePublicKey(key) - if err != nil { - return nil, "", err - } - comment = string(bytes.TrimSpace(in[i:])) - return out, comment, nil -} - -// ParseKnownHosts parses an entry in the format of the known_hosts file. -// -// The known_hosts format is documented in the sshd(8) manual page. This -// function will parse a single entry from in. On successful return, marker -// will contain the optional marker value (i.e. "cert-authority" or "revoked") -// or else be empty, hosts will contain the hosts that this entry matches, -// pubKey will contain the public key and comment will contain any trailing -// comment at the end of the line. See the sshd(8) manual page for the various -// forms that a host string can take. -// -// The unparsed remainder of the input will be returned in rest. This function -// can be called repeatedly to parse multiple entries. -// -// If no entries were found in the input then err will be io.EOF. Otherwise a -// non-nil err value indicates a parse error. -func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - // Strip out the beginning of the known_host key. - // This is either an optional marker or a (set of) hostname(s). - keyFields := bytes.Fields(in) - if len(keyFields) < 3 || len(keyFields) > 5 { - return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") - } - - // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated - // list of hosts - marker := "" - if keyFields[0][0] == '@' { - marker = string(keyFields[0][1:]) - keyFields = keyFields[1:] - } - - hosts := string(keyFields[0]) - // keyFields[1] contains the key type (e.g. “ssh-rsa”). - // However, that information is duplicated inside the - // base64-encoded key and so is ignored here. - - key := bytes.Join(keyFields[2:], []byte(" ")) - if pubKey, comment, err = parseAuthorizedKey(key); err != nil { - return "", nil, nil, "", nil, err - } - - return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil - } - - return "", nil, nil, "", nil, io.EOF -} - -// ParseAuthorizedKeys parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. -func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - return out, comment, options, rest, nil - } - - // No key type recognised. Maybe there's an options field at - // the beginning. - var b byte - inQuote := false - var candidateOptions []string - optionStart := 0 - for i, b = range in { - isEnd := !inQuote && (b == ' ' || b == '\t') - if (b == ',' && !inQuote) || isEnd { - if i-optionStart > 0 { - candidateOptions = append(candidateOptions, string(in[optionStart:i])) - } - optionStart = i + 1 - } - if isEnd { - break - } - if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { - inQuote = !inQuote - } - } - for i < len(in) && (in[i] == ' ' || in[i] == '\t') { - i++ - } - if i == len(in) { - // Invalid line: unmatched quote - in = rest - continue - } - - in = in[i:] - i = bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - options = candidateOptions - return out, comment, options, rest, nil - } - - in = rest - continue - } - - return nil, "", nil, nil, errors.New("ssh: no key found") -} - -// ParsePublicKey parses an SSH public key formatted for use in -// the SSH wire protocol according to RFC 4253, section 6.6. -func ParsePublicKey(in []byte) (out PublicKey, err error) { - algo, in, ok := parseString(in) - if !ok { - return nil, errShortRead - } - var rest []byte - out, rest, err = parsePubKey(in, string(algo)) - if len(rest) > 0 { - return nil, errors.New("ssh: trailing junk in public key") - } - - return out, err -} - -// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH -// authorized_keys file. The return value ends with newline. -func MarshalAuthorizedKey(key PublicKey) []byte { - b := &bytes.Buffer{} - b.WriteString(key.Type()) - b.WriteByte(' ') - e := base64.NewEncoder(base64.StdEncoding, b) - e.Write(key.Marshal()) - e.Close() - b.WriteByte('\n') - return b.Bytes() -} - -// PublicKey is an abstraction of different types of public keys. -type PublicKey interface { - // Type returns the key's type, e.g. "ssh-rsa". - Type() string - - // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. - Marshal() []byte - - // Verify that sig is a signature on the given data using this - // key. This function will hash the data appropriately first. - Verify(data []byte, sig *Signature) error -} - -// CryptoPublicKey, if implemented by a PublicKey, -// returns the underlying crypto.PublicKey form of the key. -type CryptoPublicKey interface { - CryptoPublicKey() crypto.PublicKey -} - -// A Signer can create signatures that verify against a public key. -type Signer interface { - // PublicKey returns an associated PublicKey instance. - PublicKey() PublicKey - - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. - Sign(rand io.Reader, data []byte) (*Signature, error) -} - -type rsaPublicKey rsa.PublicKey - -func (r *rsaPublicKey) Type() string { - return "ssh-rsa" -} - -// parseRSA parses an RSA key according to RFC 4253, section 6.6. -func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - E *big.Int - N *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if w.E.BitLen() > 24 { - return nil, nil, errors.New("ssh: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, nil, errors.New("ssh: incorrect exponent") - } - - var key rsa.PublicKey - key.E = int(e) - key.N = w.N - return (*rsaPublicKey)(&key), w.Rest, nil -} - -func (r *rsaPublicKey) Marshal() []byte { - e := new(big.Int).SetInt64(int64(r.E)) - // RSA publickey struct layout should match the struct used by - // parseRSACert in the x/crypto/ssh/agent package. - wirekey := struct { - Name string - E *big.Int - N *big.Int - }{ - KeyAlgoRSA, - e, - r.N, - } - return Marshal(&wirekey) -} - -func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != r.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) - } - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob) -} - -func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*rsa.PublicKey)(r) -} - -type dsaPublicKey dsa.PublicKey - -func (k *dsaPublicKey) Type() string { - return "ssh-dss" -} - -func checkDSAParams(param *dsa.Parameters) error { - // SSH specifies FIPS 186-2, which only provided a single size - // (1024 bits) DSA key. FIPS 186-3 allows for larger key - // sizes, which would confuse SSH. - if l := param.P.BitLen(); l != 1024 { - return fmt.Errorf("ssh: unsupported DSA key size %d", l) - } - - return nil -} - -// parseDSA parses an DSA key according to RFC 4253, section 6.6. -func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - P, Q, G, Y *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - param := dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - } - if err := checkDSAParams(¶m); err != nil { - return nil, nil, err - } - - key := &dsaPublicKey{ - Parameters: param, - Y: w.Y, - } - return key, w.Rest, nil -} - -func (k *dsaPublicKey) Marshal() []byte { - // DSA publickey struct layout should match the struct used by - // parseDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - P, Q, G, Y *big.Int - }{ - k.Type(), - k.P, - k.Q, - k.G, - k.Y, - } - - return Marshal(&w) -} - -func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 4253, section 6.6, - // The value for 'dss_signature_blob' is encoded as a string containing - // r, followed by s (which are 160-bit integers, without lengths or - // padding, unsigned, and in network byte order). - // For DSS purposes, sig.Blob should be exactly 40 bytes in length. - if len(sig.Blob) != 40 { - return errors.New("ssh: DSA signature parse error") - } - r := new(big.Int).SetBytes(sig.Blob[:20]) - s := new(big.Int).SetBytes(sig.Blob[20:]) - if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*dsa.PublicKey)(k) -} - -type dsaPrivateKey struct { - *dsa.PrivateKey -} - -func (k *dsaPrivateKey) PublicKey() PublicKey { - return (*dsaPublicKey)(&k.PrivateKey.PublicKey) -} - -func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - r, s, err := dsa.Sign(rand, k.PrivateKey, digest) - if err != nil { - return nil, err - } - - sig := make([]byte, 40) - rb := r.Bytes() - sb := s.Bytes() - - copy(sig[20-len(rb):20], rb) - copy(sig[40-len(sb):], sb) - - return &Signature{ - Format: k.PublicKey().Type(), - Blob: sig, - }, nil -} - -type ecdsaPublicKey ecdsa.PublicKey - -func (k *ecdsaPublicKey) Type() string { - return "ecdsa-sha2-" + k.nistID() -} - -func (k *ecdsaPublicKey) nistID() string { - switch k.Params().BitSize { - case 256: - return "nistp256" - case 384: - return "nistp384" - case 521: - return "nistp521" - } - panic("ssh: unsupported ecdsa key size") -} - -type ed25519PublicKey ed25519.PublicKey - -func (k ed25519PublicKey) Type() string { - return KeyAlgoED25519 -} - -func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := ed25519.PublicKey(w.KeyBytes) - - return (ed25519PublicKey)(key), w.Rest, nil -} - -func (k ed25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - }{ - KeyAlgoED25519, - []byte(k), - } - return Marshal(&w) -} - -func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - edKey := (ed25519.PublicKey)(k) - if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { - return ed25519.PublicKey(k) -} - -func supportedEllipticCurve(curve elliptic.Curve) bool { - return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() -} - -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - -// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. -func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(ecdsa.PublicKey) - - switch w.Curve { - case "nistp256": - key.Curve = elliptic.P256() - case "nistp384": - key.Curve = elliptic.P384() - case "nistp521": - key.Curve = elliptic.P521() - default: - return nil, nil, errors.New("ssh: unsupported curve") - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - return (*ecdsaPublicKey)(key), w.Rest, nil -} - -func (k *ecdsaPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - // ECDSA publickey struct layout should match the struct used by - // parseECDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - ID string - Key []byte - }{ - k.Type(), - k.nistID(), - keyBytes, - } - - return Marshal(&w) -} - -func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := ecHash(k.Curve).New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 5656, section 3.1.2, - // The ecdsa_signature_blob value has the following specific encoding: - // mpint r - // mpint s - var ecSig struct { - R *big.Int - S *big.Int - } - - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*ecdsa.PublicKey)(k) -} - -// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, -// *ecdsa.PrivateKey or any other crypto.Signer and returns a -// corresponding Signer instance. ECDSA keys must use P-256, P-384 or -// P-521. DSA keys must use parameter size L1024N160. -func NewSignerFromKey(key interface{}) (Signer, error) { - switch key := key.(type) { - case crypto.Signer: - return NewSignerFromSigner(key) - case *dsa.PrivateKey: - return newDSAPrivateKey(key) - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { - if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { - return nil, err - } - - return &dsaPrivateKey{key}, nil -} - -type wrappedSigner struct { - signer crypto.Signer - pubKey PublicKey -} - -// NewSignerFromSigner takes any crypto.Signer implementation and -// returns a corresponding Signer interface. This can be used, for -// example, with keys kept in hardware modules. -func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { - pubKey, err := NewPublicKey(signer.Public()) - if err != nil { - return nil, err - } - - return &wrappedSigner{signer, pubKey}, nil -} - -func (s *wrappedSigner) PublicKey() PublicKey { - return s.pubKey -} - -func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - var hashFunc crypto.Hash - - switch key := s.pubKey.(type) { - case *rsaPublicKey, *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } - - var digest []byte - if hashFunc != 0 { - h := hashFunc.New() - h.Write(data) - digest = h.Sum(nil) - } else { - digest = data - } - - signature, err := s.signer.Sign(rand, digest, hashFunc) - if err != nil { - return nil, err - } - - // crypto.Signer.Sign is expected to return an ASN.1-encoded signature - // for ECDSA and DSA, but that's not the encoding expected by SSH, so - // re-encode. - switch s.pubKey.(type) { - case *ecdsaPublicKey, *dsaPublicKey: - type asn1Signature struct { - R, S *big.Int - } - asn1Sig := new(asn1Signature) - _, err := asn1.Unmarshal(signature, asn1Sig) - if err != nil { - return nil, err - } - - switch s.pubKey.(type) { - case *ecdsaPublicKey: - signature = Marshal(asn1Sig) - - case *dsaPublicKey: - signature = make([]byte, 40) - r := asn1Sig.R.Bytes() - s := asn1Sig.S.Bytes() - copy(signature[20-len(r):20], r) - copy(signature[40-len(s):40], s) - } - } - - return &Signature{ - Format: s.pubKey.Type(), - Blob: signature, - }, nil -} - -// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, -// or ed25519.PublicKey returns a corresponding PublicKey instance. -// ECDSA keys must use P-256, P-384 or P-521. -func NewPublicKey(key interface{}) (PublicKey, error) { - switch key := key.(type) { - case *rsa.PublicKey: - return (*rsaPublicKey)(key), nil - case *ecdsa.PublicKey: - if !supportedEllipticCurve(key.Curve) { - return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") - } - return (*ecdsaPublicKey)(key), nil - case *dsa.PublicKey: - return (*dsaPublicKey)(key), nil - case ed25519.PublicKey: - return (ed25519PublicKey)(key), nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports -// the same keys as ParseRawPrivateKey. -func ParsePrivateKey(pemBytes []byte) (Signer, error) { - key, err := ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private -// key and passphrase. It supports the same keys as -// ParseRawPrivateKeyWithPassphrase. -func ParsePrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (Signer, error) { - key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// encryptedBlock tells whether a private key is -// encrypted by examining its Proc-Type header -// for a mention of ENCRYPTED -// according to RFC 1421 Section 4.6.1.1. -func encryptedBlock(block *pem.Block) bool { - return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") -} - -// ParseRawPrivateKey returns a private key from a PEM encoded private key. It -// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys. -func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if encryptedBlock(block) { - return nil, errors.New("ssh: cannot decode encrypted private keys") - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(block.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(block.Bytes) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(block.Bytes) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with -// passphrase from a PEM encoded private key. If wrong passphrase, return -// x509.IncorrectPasswordError. -func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - buf := block.Bytes - - if encryptedBlock(block) { - if x509.IsEncryptedPEMBlock(block) { - var err error - buf, err = x509.DecryptPEMBlock(block, passPhrase) - if err != nil { - if err == x509.IncorrectPasswordError { - return nil, err - } - return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) - } - } - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(buf) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(buf) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(buf) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(buf) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as -// specified by the OpenSSL DSA man page. -func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { - var k struct { - Version int - P *big.Int - Q *big.Int - G *big.Int - Pub *big.Int - Priv *big.Int - } - rest, err := asn1.Unmarshal(der, &k) - if err != nil { - return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) - } - if len(rest) > 0 { - return nil, errors.New("ssh: garbage after DSA key") - } - - return &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Pub, - }, - X: k.Priv, - }, nil -} - -// Implemented based on the documentation at -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key -func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { - magic := append([]byte("openssh-key-v1"), 0) - if !bytes.Equal(magic, key[0:len(magic)]) { - return nil, errors.New("ssh: invalid openssh private key format") - } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } - - if err := Unmarshal(remaining, &w); err != nil { - return nil, err - } - - if w.KdfName != "none" || w.CipherName != "none" { - return nil, errors.New("ssh: cannot decode encrypted private keys") - } - - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` - }{} - - if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { - return nil, err - } - - if pk1.Check1 != pk1.Check2 { - return nil, errors.New("ssh: checkint mismatch") - } - - // we only handle ed25519 and rsa keys currently - switch pk1.Keytype { - case KeyAlgoRSA: - // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - for i, b := range key.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } - } - - pk := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: key.N, - E: int(key.E.Int64()), - }, - D: key.D, - Primes: []*big.Int{key.P, key.Q}, - } - - if err := pk.Validate(); err != nil { - return nil, err - } - - pk.Precompute() - - return pk, nil - case KeyAlgoED25519: - key := struct { - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if len(key.Priv) != ed25519.PrivateKeySize { - return nil, errors.New("ssh: private key unexpected length") - } - - for i, b := range key.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } - } - - pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) - copy(pk, key.Priv) - return &pk, nil - default: - return nil, errors.New("ssh: unhandled key type") - } -} - -// FingerprintLegacyMD5 returns the user presentation of the key's -// fingerprint as described by RFC 4716 section 4. -func FingerprintLegacyMD5(pubKey PublicKey) string { - md5sum := md5.Sum(pubKey.Marshal()) - hexarray := make([]string, len(md5sum)) - for i, c := range md5sum { - hexarray[i] = hex.EncodeToString([]byte{c}) - } - return strings.Join(hexarray, ":") -} - -// FingerprintSHA256 returns the user presentation of the key's -// fingerprint as unpadded base64 encoded sha256 hash. -// This format was introduced from OpenSSH 6.8. -// https://www.openssh.com/txt/release-6.8 -// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) -func FingerprintSHA256(pubKey PublicKey) string { - sha256sum := sha256.Sum256(pubKey.Marshal()) - hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) - return "SHA256:" + hash -} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go deleted file mode 100644 index c07a06285..000000000 --- a/vendor/golang.org/x/crypto/ssh/mac.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Message authentication support - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "hash" -) - -type macMode struct { - keySize int - etm bool - new func(key []byte) hash.Hash -} - -// truncatingMAC wraps around a hash.Hash and truncates the output digest to -// a given size. -type truncatingMAC struct { - length int - hmac hash.Hash -} - -func (t truncatingMAC) Write(data []byte) (int, error) { - return t.hmac.Write(data) -} - -func (t truncatingMAC) Sum(in []byte) []byte { - out := t.hmac.Sum(in) - return out[:len(in)+t.length] -} - -func (t truncatingMAC) Reset() { - t.hmac.Reset() -} - -func (t truncatingMAC) Size() int { - return t.length -} - -func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } - -var macModes = map[string]*macMode{ - "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha1": {20, false, func(key []byte) hash.Hash { - return hmac.New(sha1.New, key) - }}, - "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { - return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, -} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go deleted file mode 100644 index 08d281173..000000000 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ /dev/null @@ -1,766 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "reflect" - "strconv" - "strings" -) - -// These are SSH message type numbers. They are scattered around several -// documents but many were taken from [SSH-PARAMETERS]. -const ( - msgIgnore = 2 - msgUnimplemented = 3 - msgDebug = 4 - msgNewKeys = 21 -) - -// SSH messages: -// -// These structures mirror the wire format of the corresponding SSH messages. -// They are marshaled using reflection with the marshal and unmarshal functions -// in this file. The only wrinkle is that a final member of type []byte with a -// ssh tag of "rest" receives the remainder of a packet when unmarshaling. - -// See RFC 4253, section 11.1. -const msgDisconnect = 1 - -// disconnectMsg is the message that signals a disconnect. It is also -// the error type returned from mux.Wait() -type disconnectMsg struct { - Reason uint32 `sshtype:"1"` - Message string - Language string -} - -func (d *disconnectMsg) Error() string { - return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) -} - -// See RFC 4253, section 7.1. -const msgKexInit = 20 - -type kexInitMsg struct { - Cookie [16]byte `sshtype:"20"` - KexAlgos []string - ServerHostKeyAlgos []string - CiphersClientServer []string - CiphersServerClient []string - MACsClientServer []string - MACsServerClient []string - CompressionClientServer []string - CompressionServerClient []string - LanguagesClientServer []string - LanguagesServerClient []string - FirstKexFollows bool - Reserved uint32 -} - -// See RFC 4253, section 8. - -// Diffie-Helman -const msgKexDHInit = 30 - -type kexDHInitMsg struct { - X *big.Int `sshtype:"30"` -} - -const msgKexECDHInit = 30 - -type kexECDHInitMsg struct { - ClientPubKey []byte `sshtype:"30"` -} - -const msgKexECDHReply = 31 - -type kexECDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - EphemeralPubKey []byte - Signature []byte -} - -const msgKexDHReply = 31 - -type kexDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - Y *big.Int - Signature []byte -} - -// See RFC 4253, section 10. -const msgServiceRequest = 5 - -type serviceRequestMsg struct { - Service string `sshtype:"5"` -} - -// See RFC 4253, section 10. -const msgServiceAccept = 6 - -type serviceAcceptMsg struct { - Service string `sshtype:"6"` -} - -// See RFC 4252, section 5. -const msgUserAuthRequest = 50 - -type userAuthRequestMsg struct { - User string `sshtype:"50"` - Service string - Method string - Payload []byte `ssh:"rest"` -} - -// Used for debug printouts of packets. -type userAuthSuccessMsg struct { -} - -// See RFC 4252, section 5.1 -const msgUserAuthFailure = 51 - -type userAuthFailureMsg struct { - Methods []string `sshtype:"51"` - PartialSuccess bool -} - -// See RFC 4252, section 5.1 -const msgUserAuthSuccess = 52 - -// See RFC 4252, section 5.4 -const msgUserAuthBanner = 53 - -type userAuthBannerMsg struct { - Message string `sshtype:"53"` - // unused, but required to allow message parsing - Language string -} - -// See RFC 4256, section 3.2 -const msgUserAuthInfoRequest = 60 -const msgUserAuthInfoResponse = 61 - -type userAuthInfoRequestMsg struct { - User string `sshtype:"60"` - Instruction string - DeprecatedLanguage string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpen = 90 - -type channelOpenMsg struct { - ChanType string `sshtype:"90"` - PeersID uint32 - PeersWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -const msgChannelExtendedData = 95 -const msgChannelData = 94 - -// Used for debug print outs of packets. -type channelDataMsg struct { - PeersID uint32 `sshtype:"94"` - Length uint32 - Rest []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenConfirm = 91 - -type channelOpenConfirmMsg struct { - PeersID uint32 `sshtype:"91"` - MyID uint32 - MyWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenFailure = 92 - -type channelOpenFailureMsg struct { - PeersID uint32 `sshtype:"92"` - Reason RejectionReason - Message string - Language string -} - -const msgChannelRequest = 98 - -type channelRequestMsg struct { - PeersID uint32 `sshtype:"98"` - Request string - WantReply bool - RequestSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.4. -const msgChannelSuccess = 99 - -type channelRequestSuccessMsg struct { - PeersID uint32 `sshtype:"99"` -} - -// See RFC 4254, section 5.4. -const msgChannelFailure = 100 - -type channelRequestFailureMsg struct { - PeersID uint32 `sshtype:"100"` -} - -// See RFC 4254, section 5.3 -const msgChannelClose = 97 - -type channelCloseMsg struct { - PeersID uint32 `sshtype:"97"` -} - -// See RFC 4254, section 5.3 -const msgChannelEOF = 96 - -type channelEOFMsg struct { - PeersID uint32 `sshtype:"96"` -} - -// See RFC 4254, section 4 -const msgGlobalRequest = 80 - -type globalRequestMsg struct { - Type string `sshtype:"80"` - WantReply bool - Data []byte `ssh:"rest"` -} - -// See RFC 4254, section 4 -const msgRequestSuccess = 81 - -type globalRequestSuccessMsg struct { - Data []byte `ssh:"rest" sshtype:"81"` -} - -// See RFC 4254, section 4 -const msgRequestFailure = 82 - -type globalRequestFailureMsg struct { - Data []byte `ssh:"rest" sshtype:"82"` -} - -// See RFC 4254, section 5.2 -const msgChannelWindowAdjust = 93 - -type windowAdjustMsg struct { - PeersID uint32 `sshtype:"93"` - AdditionalBytes uint32 -} - -// See RFC 4252, section 7 -const msgUserAuthPubKeyOk = 60 - -type userAuthPubKeyOkMsg struct { - Algo string `sshtype:"60"` - PubKey []byte -} - -// typeTags returns the possible type bytes for the given reflect.Type, which -// should be a struct. The possible values are separated by a '|' character. -func typeTags(structType reflect.Type) (tags []byte) { - tagStr := structType.Field(0).Tag.Get("sshtype") - - for _, tag := range strings.Split(tagStr, "|") { - i, err := strconv.Atoi(tag) - if err == nil { - tags = append(tags, byte(i)) - } - } - - return tags -} - -func fieldError(t reflect.Type, field int, problem string) error { - if problem != "" { - problem = ": " + problem - } - return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) -} - -var errShortRead = errors.New("ssh: short read") - -// Unmarshal parses data in SSH wire format into a structure. The out -// argument should be a pointer to struct. If the first member of the -// struct has the "sshtype" tag set to a '|'-separated set of numbers -// in decimal, the packet must start with one of those numbers. In -// case of error, Unmarshal returns a ParseError or -// UnexpectedMessageError. -func Unmarshal(data []byte, out interface{}) error { - v := reflect.ValueOf(out).Elem() - structType := v.Type() - expectedTypes := typeTags(structType) - - var expectedType byte - if len(expectedTypes) > 0 { - expectedType = expectedTypes[0] - } - - if len(data) == 0 { - return parseError(expectedType) - } - - if len(expectedTypes) > 0 { - goodType := false - for _, e := range expectedTypes { - if e > 0 && data[0] == e { - goodType = true - break - } - } - if !goodType { - return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) - } - data = data[1:] - } - - var ok bool - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - t := field.Type() - switch t.Kind() { - case reflect.Bool: - if len(data) < 1 { - return errShortRead - } - field.SetBool(data[0] != 0) - data = data[1:] - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - return fieldError(structType, i, "array of unsupported type") - } - if len(data) < t.Len() { - return errShortRead - } - for j, n := 0, t.Len(); j < n; j++ { - field.Index(j).Set(reflect.ValueOf(data[j])) - } - data = data[t.Len():] - case reflect.Uint64: - var u64 uint64 - if u64, data, ok = parseUint64(data); !ok { - return errShortRead - } - field.SetUint(u64) - case reflect.Uint32: - var u32 uint32 - if u32, data, ok = parseUint32(data); !ok { - return errShortRead - } - field.SetUint(uint64(u32)) - case reflect.Uint8: - if len(data) < 1 { - return errShortRead - } - field.SetUint(uint64(data[0])) - data = data[1:] - case reflect.String: - var s []byte - if s, data, ok = parseString(data); !ok { - return fieldError(structType, i, "") - } - field.SetString(string(s)) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if structType.Field(i).Tag.Get("ssh") == "rest" { - field.Set(reflect.ValueOf(data)) - data = nil - } else { - var s []byte - if s, data, ok = parseString(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(s)) - } - case reflect.String: - var nl []string - if nl, data, ok = parseNameList(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(nl)) - default: - return fieldError(structType, i, "slice of unsupported type") - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - if n, data, ok = parseInt(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(n)) - } else { - return fieldError(structType, i, "pointer to unsupported type") - } - default: - return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) - } - } - - if len(data) != 0 { - return parseError(expectedType) - } - - return nil -} - -// Marshal serializes the message in msg to SSH wire format. The msg -// argument should be a struct or pointer to struct. If the first -// member has the "sshtype" tag set to a number in decimal, that -// number is prepended to the result. If the last of member has the -// "ssh" tag set to "rest", its contents are appended to the output. -func Marshal(msg interface{}) []byte { - out := make([]byte, 0, 64) - return marshalStruct(out, msg) -} - -func marshalStruct(out []byte, msg interface{}) []byte { - v := reflect.Indirect(reflect.ValueOf(msg)) - msgTypes := typeTags(v.Type()) - if len(msgTypes) > 0 { - out = append(out, msgTypes[0]) - } - - for i, n := 0, v.NumField(); i < n; i++ { - field := v.Field(i) - switch t := field.Type(); t.Kind() { - case reflect.Bool: - var v uint8 - if field.Bool() { - v = 1 - } - out = append(out, v) - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) - } - for j, l := 0, t.Len(); j < l; j++ { - out = append(out, uint8(field.Index(j).Uint())) - } - case reflect.Uint32: - out = appendU32(out, uint32(field.Uint())) - case reflect.Uint64: - out = appendU64(out, uint64(field.Uint())) - case reflect.Uint8: - out = append(out, uint8(field.Uint())) - case reflect.String: - s := field.String() - out = appendInt(out, len(s)) - out = append(out, s...) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if v.Type().Field(i).Tag.Get("ssh") != "rest" { - out = appendInt(out, field.Len()) - } - out = append(out, field.Bytes()...) - case reflect.String: - offset := len(out) - out = appendU32(out, 0) - if n := field.Len(); n > 0 { - for j := 0; j < n; j++ { - f := field.Index(j) - if j != 0 { - out = append(out, ',') - } - out = append(out, f.String()...) - } - // overwrite length value - binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) - } - default: - panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - nValue := reflect.ValueOf(&n) - nValue.Elem().Set(field) - needed := intLength(n) - oldLength := len(out) - - if cap(out)-len(out) < needed { - newOut := make([]byte, len(out), 2*(len(out)+needed)) - copy(newOut, out) - out = newOut - } - out = out[:oldLength+needed] - marshalInt(out[oldLength:], n) - } else { - panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) - } - } - } - - return out -} - -var bigOne = big.NewInt(1) - -func parseString(in []byte) (out, rest []byte, ok bool) { - if len(in) < 4 { - return - } - length := binary.BigEndian.Uint32(in) - in = in[4:] - if uint32(len(in)) < length { - return - } - out = in[:length] - rest = in[length:] - ok = true - return -} - -var ( - comma = []byte{','} - emptyNameList = []string{} -) - -func parseNameList(in []byte) (out []string, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - if len(contents) == 0 { - out = emptyNameList - return - } - parts := bytes.Split(contents, comma) - out = make([]string, len(parts)) - for i, part := range parts { - out[i] = string(part) - } - return -} - -func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - out = new(big.Int) - - if len(contents) > 0 && contents[0]&0x80 == 0x80 { - // This is a negative number - notBytes := make([]byte, len(contents)) - for i := range notBytes { - notBytes[i] = ^contents[i] - } - out.SetBytes(notBytes) - out.Add(out, bigOne) - out.Neg(out) - } else { - // Positive number - out.SetBytes(contents) - } - ok = true - return -} - -func parseUint32(in []byte) (uint32, []byte, bool) { - if len(in) < 4 { - return 0, nil, false - } - return binary.BigEndian.Uint32(in), in[4:], true -} - -func parseUint64(in []byte) (uint64, []byte, bool) { - if len(in) < 8 { - return 0, nil, false - } - return binary.BigEndian.Uint64(in), in[8:], true -} - -func intLength(n *big.Int) int { - length := 4 /* length bytes */ - if n.Sign() < 0 { - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bitLen := nMinus1.BitLen() - if bitLen%8 == 0 { - // The number will need 0xff padding - length++ - } - length += (bitLen + 7) / 8 - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bitLen := n.BitLen() - if bitLen%8 == 0 { - // The number will need 0x00 padding - length++ - } - length += (bitLen + 7) / 8 - } - - return length -} - -func marshalUint32(to []byte, n uint32) []byte { - binary.BigEndian.PutUint32(to, n) - return to[4:] -} - -func marshalUint64(to []byte, n uint64) []byte { - binary.BigEndian.PutUint64(to, n) - return to[8:] -} - -func marshalInt(to []byte, n *big.Int) []byte { - lengthBytes := to - to = to[4:] - length := 0 - - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement - // form. So we'll subtract 1 and invert. If the - // most-significant-bit isn't set then we'll need to pad the - // beginning with 0xff in order to keep the number negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - to[0] = 0xff - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bytes := n.Bytes() - if len(bytes) > 0 && bytes[0]&0x80 != 0 { - // We'll have to pad this with a 0x00 in order to - // stop it looking like a negative number. - to[0] = 0 - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } - - lengthBytes[0] = byte(length >> 24) - lengthBytes[1] = byte(length >> 16) - lengthBytes[2] = byte(length >> 8) - lengthBytes[3] = byte(length) - return to -} - -func writeInt(w io.Writer, n *big.Int) { - length := intLength(n) - buf := make([]byte, length) - marshalInt(buf, n) - w.Write(buf) -} - -func writeString(w io.Writer, s []byte) { - var lengthBytes [4]byte - lengthBytes[0] = byte(len(s) >> 24) - lengthBytes[1] = byte(len(s) >> 16) - lengthBytes[2] = byte(len(s) >> 8) - lengthBytes[3] = byte(len(s)) - w.Write(lengthBytes[:]) - w.Write(s) -} - -func stringLength(n int) int { - return 4 + n -} - -func marshalString(to []byte, s []byte) []byte { - to[0] = byte(len(s) >> 24) - to[1] = byte(len(s) >> 16) - to[2] = byte(len(s) >> 8) - to[3] = byte(len(s)) - to = to[4:] - copy(to, s) - return to[len(s):] -} - -var bigIntType = reflect.TypeOf((*big.Int)(nil)) - -// Decode a packet into its corresponding message. -func decode(packet []byte) (interface{}, error) { - var msg interface{} - switch packet[0] { - case msgDisconnect: - msg = new(disconnectMsg) - case msgServiceRequest: - msg = new(serviceRequestMsg) - case msgServiceAccept: - msg = new(serviceAcceptMsg) - case msgKexInit: - msg = new(kexInitMsg) - case msgKexDHInit: - msg = new(kexDHInitMsg) - case msgKexDHReply: - msg = new(kexDHReplyMsg) - case msgUserAuthRequest: - msg = new(userAuthRequestMsg) - case msgUserAuthSuccess: - return new(userAuthSuccessMsg), nil - case msgUserAuthFailure: - msg = new(userAuthFailureMsg) - case msgUserAuthPubKeyOk: - msg = new(userAuthPubKeyOkMsg) - case msgGlobalRequest: - msg = new(globalRequestMsg) - case msgRequestSuccess: - msg = new(globalRequestSuccessMsg) - case msgRequestFailure: - msg = new(globalRequestFailureMsg) - case msgChannelOpen: - msg = new(channelOpenMsg) - case msgChannelData: - msg = new(channelDataMsg) - case msgChannelOpenConfirm: - msg = new(channelOpenConfirmMsg) - case msgChannelOpenFailure: - msg = new(channelOpenFailureMsg) - case msgChannelWindowAdjust: - msg = new(windowAdjustMsg) - case msgChannelEOF: - msg = new(channelEOFMsg) - case msgChannelClose: - msg = new(channelCloseMsg) - case msgChannelRequest: - msg = new(channelRequestMsg) - case msgChannelSuccess: - msg = new(channelRequestSuccessMsg) - case msgChannelFailure: - msg = new(channelRequestFailureMsg) - default: - return nil, unexpectedMessageError(0, packet[0]) - } - if err := Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go deleted file mode 100644 index f19016270..000000000 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "fmt" - "io" - "log" - "sync" - "sync/atomic" -) - -// debugMux, if set, causes messages in the connection protocol to be -// logged. -const debugMux = false - -// chanList is a thread safe channel list. -type chanList struct { - // protects concurrent access to chans - sync.Mutex - - // chans are indexed by the local id of the channel, which the - // other side should send in the PeersId field. - chans []*channel - - // This is a debugging aid: it offsets all IDs by this - // amount. This helps distinguish otherwise identical - // server/client muxes - offset uint32 -} - -// Assigns a channel ID to the given channel. -func (c *chanList) add(ch *channel) uint32 { - c.Lock() - defer c.Unlock() - for i := range c.chans { - if c.chans[i] == nil { - c.chans[i] = ch - return uint32(i) + c.offset - } - } - c.chans = append(c.chans, ch) - return uint32(len(c.chans)-1) + c.offset -} - -// getChan returns the channel for the given ID. -func (c *chanList) getChan(id uint32) *channel { - id -= c.offset - - c.Lock() - defer c.Unlock() - if id < uint32(len(c.chans)) { - return c.chans[id] - } - return nil -} - -func (c *chanList) remove(id uint32) { - id -= c.offset - c.Lock() - if id < uint32(len(c.chans)) { - c.chans[id] = nil - } - c.Unlock() -} - -// dropAll forgets all channels it knows, returning them in a slice. -func (c *chanList) dropAll() []*channel { - c.Lock() - defer c.Unlock() - var r []*channel - - for _, ch := range c.chans { - if ch == nil { - continue - } - r = append(r, ch) - } - c.chans = nil - return r -} - -// mux represents the state for the SSH connection protocol, which -// multiplexes many channels onto a single packet transport. -type mux struct { - conn packetConn - chanList chanList - - incomingChannels chan NewChannel - - globalSentMu sync.Mutex - globalResponses chan interface{} - incomingRequests chan *Request - - errCond *sync.Cond - err error -} - -// When debugging, each new chanList instantiation has a different -// offset. -var globalOff uint32 - -func (m *mux) Wait() error { - m.errCond.L.Lock() - defer m.errCond.L.Unlock() - for m.err == nil { - m.errCond.Wait() - } - return m.err -} - -// newMux returns a mux that runs over the given connection. -func newMux(p packetConn) *mux { - m := &mux{ - conn: p, - incomingChannels: make(chan NewChannel, chanSize), - globalResponses: make(chan interface{}, 1), - incomingRequests: make(chan *Request, chanSize), - errCond: newCond(), - } - if debugMux { - m.chanList.offset = atomic.AddUint32(&globalOff, 1) - } - - go m.loop() - return m -} - -func (m *mux) sendMessage(msg interface{}) error { - p := Marshal(msg) - if debugMux { - log.Printf("send global(%d): %#v", m.chanList.offset, msg) - } - return m.conn.writePacket(p) -} - -func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { - if wantReply { - m.globalSentMu.Lock() - defer m.globalSentMu.Unlock() - } - - if err := m.sendMessage(globalRequestMsg{ - Type: name, - WantReply: wantReply, - Data: payload, - }); err != nil { - return false, nil, err - } - - if !wantReply { - return false, nil, nil - } - - msg, ok := <-m.globalResponses - if !ok { - return false, nil, io.EOF - } - switch msg := msg.(type) { - case *globalRequestFailureMsg: - return false, msg.Data, nil - case *globalRequestSuccessMsg: - return true, msg.Data, nil - default: - return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) - } -} - -// ackRequest must be called after processing a global request that -// has WantReply set. -func (m *mux) ackRequest(ok bool, data []byte) error { - if ok { - return m.sendMessage(globalRequestSuccessMsg{Data: data}) - } - return m.sendMessage(globalRequestFailureMsg{Data: data}) -} - -func (m *mux) Close() error { - return m.conn.Close() -} - -// loop runs the connection machine. It will process packets until an -// error is encountered. To synchronize on loop exit, use mux.Wait. -func (m *mux) loop() { - var err error - for err == nil { - err = m.onePacket() - } - - for _, ch := range m.chanList.dropAll() { - ch.close() - } - - close(m.incomingChannels) - close(m.incomingRequests) - close(m.globalResponses) - - m.conn.Close() - - m.errCond.L.Lock() - m.err = err - m.errCond.Broadcast() - m.errCond.L.Unlock() - - if debugMux { - log.Println("loop exit", err) - } -} - -// onePacket reads and processes one packet. -func (m *mux) onePacket() error { - packet, err := m.conn.readPacket() - if err != nil { - return err - } - - if debugMux { - if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { - log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) - } else { - p, _ := decode(packet) - log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) - } - } - - switch packet[0] { - case msgChannelOpen: - return m.handleChannelOpen(packet) - case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: - return m.handleGlobalPacket(packet) - } - - // assume a channel packet. - if len(packet) < 5 { - return parseError(packet[0]) - } - id := binary.BigEndian.Uint32(packet[1:]) - ch := m.chanList.getChan(id) - if ch == nil { - return fmt.Errorf("ssh: invalid channel %d", id) - } - - return ch.handlePacket(packet) -} - -func (m *mux) handleGlobalPacket(packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - case *globalRequestMsg: - m.incomingRequests <- &Request{ - Type: msg.Type, - WantReply: msg.WantReply, - Payload: msg.Data, - mux: m, - } - case *globalRequestSuccessMsg, *globalRequestFailureMsg: - m.globalResponses <- msg - default: - panic(fmt.Sprintf("not a global message %#v", msg)) - } - - return nil -} - -// handleChannelOpen schedules a channel to be Accept()ed. -func (m *mux) handleChannelOpen(packet []byte) error { - var msg channelOpenMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - failMsg := channelOpenFailureMsg{ - PeersID: msg.PeersID, - Reason: ConnectionFailed, - Message: "invalid request", - Language: "en_US.UTF-8", - } - return m.sendMessage(failMsg) - } - - c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) - c.remoteId = msg.PeersID - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.PeersWindow) - m.incomingChannels <- c - return nil -} - -func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { - ch, err := m.openChannel(chanType, extra) - if err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { - ch := m.newChannel(chanType, channelOutbound, extra) - - ch.maxIncomingPayload = channelMaxPacket - - open := channelOpenMsg{ - ChanType: chanType, - PeersWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - TypeSpecificData: extra, - PeersID: ch.localId, - } - if err := m.sendMessage(open); err != nil { - return nil, err - } - - switch msg := (<-ch.msg).(type) { - case *channelOpenConfirmMsg: - return ch, nil - case *channelOpenFailureMsg: - return nil, &OpenChannelError{msg.Reason, msg.Message} - default: - return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go deleted file mode 100644 index b83d47388..000000000 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ /dev/null @@ -1,582 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "strings" -) - -// The Permissions type holds fine-grained permissions that are -// specific to a user or a specific authentication method for a user. -// The Permissions value for a successful authentication attempt is -// available in ServerConn, so it can be used to pass information from -// the user-authentication phase to the application layer. -type Permissions struct { - // CriticalOptions indicate restrictions to the default - // permissions, and are typically used in conjunction with - // user certificates. The standard for SSH certificates - // defines "force-command" (only allow the given command to - // execute) and "source-address" (only allow connections from - // the given address). The SSH package currently only enforces - // the "source-address" critical option. It is up to server - // implementations to enforce other critical options, such as - // "force-command", by checking them after the SSH handshake - // is successful. In general, SSH servers should reject - // connections that specify critical options that are unknown - // or not supported. - CriticalOptions map[string]string - - // Extensions are extra functionality that the server may - // offer on authenticated connections. Lack of support for an - // extension does not preclude authenticating a user. Common - // extensions are "permit-agent-forwarding", - // "permit-X11-forwarding". The Go SSH library currently does - // not act on any extension, and it is up to server - // implementations to honor them. Extensions can be used to - // pass data from the authentication callbacks to the server - // application layer. - Extensions map[string]string -} - -// ServerConfig holds server specific configuration data. -type ServerConfig struct { - // Config contains configuration shared between client and server. - Config - - hostKeys []Signer - - // NoClientAuth is true if clients are allowed to connect without - // authenticating. - NoClientAuth bool - - // MaxAuthTries specifies the maximum number of authentication attempts - // permitted per connection. If set to a negative number, the number of - // attempts are unlimited. If set to zero, the number of attempts are limited - // to 6. - MaxAuthTries int - - // PasswordCallback, if non-nil, is called when a user - // attempts to authenticate using a password. - PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) - - // PublicKeyCallback, if non-nil, is called when a client - // offers a public key for authentication. It must return a nil error - // if the given public key can be used to authenticate the - // given user. For example, see CertChecker.Authenticate. A - // call to this function does not guarantee that the key - // offered is in fact used to authenticate. To record any data - // depending on the public key, store it inside a - // Permissions.Extensions entry. - PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // KeyboardInteractiveCallback, if non-nil, is called when - // keyboard-interactive authentication is selected (RFC - // 4256). The client object's Challenge function should be - // used to query the user. The callback may offer multiple - // Challenge rounds. To avoid information leaks, the client - // should be presented a challenge even if the user is - // unknown. - KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) - - // AuthLogCallback, if non-nil, is called to log all authentication - // attempts. - AuthLogCallback func(conn ConnMetadata, method string, err error) - - // ServerVersion is the version identification string to announce in - // the public handshake. - // If empty, a reasonable default is used. - // Note that RFC 4253 section 4.2 requires that this string start with - // "SSH-2.0-". - ServerVersion string - - // BannerCallback, if present, is called and the return string is sent to - // the client after key exchange completed but before authentication. - BannerCallback func(conn ConnMetadata) string -} - -// AddHostKey adds a private key as a host key. If an existing host -// key exists with the same algorithm, it is overwritten. Each server -// config must have at least one host key. -func (s *ServerConfig) AddHostKey(key Signer) { - for i, k := range s.hostKeys { - if k.PublicKey().Type() == key.PublicKey().Type() { - s.hostKeys[i] = key - return - } - } - - s.hostKeys = append(s.hostKeys, key) -} - -// cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. -type cachedPubKey struct { - user string - pubKeyData []byte - result error - perms *Permissions -} - -const maxCachedPubKeys = 16 - -// pubKeyCache caches tests for public keys. Since SSH clients -// will query whether a public key is acceptable before attempting to -// authenticate with it, we end up with duplicate queries for public -// key validity. The cache only applies to a single ServerConn. -type pubKeyCache struct { - keys []cachedPubKey -} - -// get returns the result for a given user/algo/key tuple. -func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { - for _, k := range c.keys { - if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { - return k, true - } - } - return cachedPubKey{}, false -} - -// add adds the given tuple to the cache. -func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) - } -} - -// ServerConn is an authenticated SSH connection, as seen from the -// server -type ServerConn struct { - Conn - - // If the succeeding authentication callback returned a - // non-nil Permissions pointer, it is stored here. - Permissions *Permissions -} - -// NewServerConn starts a new SSH server with c as the underlying -// transport. It starts with a handshake and, if the handshake is -// unsuccessful, it closes the connection and returns an error. The -// Request and NewChannel channels must be serviced, or the connection -// will hang. -func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.MaxAuthTries == 0 { - fullConf.MaxAuthTries = 6 - } - - s := &connection{ - sshConn: sshConn{conn: c}, - } - perms, err := s.serverHandshake(&fullConf) - if err != nil { - c.Close() - return nil, nil, nil, err - } - return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil -} - -// signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. -func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { - sig, err := k.Sign(rand, data) - if err != nil { - return nil, err - } - - return Marshal(sig), nil -} - -// handshake performs key exchange and user authentication. -func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { - if len(config.hostKeys) == 0 { - return nil, errors.New("ssh: server has no host keys") - } - - if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if config.ServerVersion != "" { - s.serverVersion = []byte(config.ServerVersion) - } else { - s.serverVersion = []byte(packageVersion) - } - var err error - s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) - if err != nil { - return nil, err - } - - tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) - s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) - - if err := s.transport.waitSession(); err != nil { - return nil, err - } - - // We just did the key change, so the session ID is established. - s.sessionID = s.transport.getSessionID() - - var packet []byte - if packet, err = s.transport.readPacket(); err != nil { - return nil, err - } - - var serviceRequest serviceRequestMsg - if err = Unmarshal(packet, &serviceRequest); err != nil { - return nil, err - } - if serviceRequest.Service != serviceUserAuth { - return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") - } - serviceAccept := serviceAcceptMsg{ - Service: serviceUserAuth, - } - if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { - return nil, err - } - - perms, err := s.serverAuthenticate(config) - if err != nil { - return nil, err - } - s.mux = newMux(s.transport) - return perms, err -} - -func isAcceptableAlgo(algo string) bool { - switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: - return true - } - return false -} - -func checkSourceAddress(addr net.Addr, sourceAddrs string) error { - if addr == nil { - return errors.New("ssh: no address known for client, but source-address match required") - } - - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) - } - - for _, sourceAddr := range strings.Split(sourceAddrs, ",") { - if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { - if allowedIP.Equal(tcpAddr.IP) { - return nil - } - } else { - _, ipNet, err := net.ParseCIDR(sourceAddr) - if err != nil { - return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) - } - - if ipNet.Contains(tcpAddr.IP) { - return nil - } - } - } - - return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) -} - -// ServerAuthError implements the error interface. It appends any authentication -// errors that may occur, and is returned if all of the authentication methods -// provided by the user failed to authenticate. -type ServerAuthError struct { - // Errors contains authentication errors returned by the authentication - // callback methods. - Errors []error -} - -func (l ServerAuthError) Error() string { - var errs []string - for _, err := range l.Errors { - errs = append(errs, err.Error()) - } - return "[" + strings.Join(errs, ", ") + "]" -} - -func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - sessionID := s.transport.getSessionID() - var cache pubKeyCache - var perms *Permissions - - authFailures := 0 - var authErrs []error - var displayedBanner bool - -userAuthLoop: - for { - if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { - discMsg := &disconnectMsg{ - Reason: 2, - Message: "too many authentication failures", - } - - if err := s.transport.writePacket(Marshal(discMsg)); err != nil { - return nil, err - } - - return nil, discMsg - } - - var userAuthReq userAuthRequestMsg - if packet, err := s.transport.readPacket(); err != nil { - if err == io.EOF { - return nil, &ServerAuthError{Errors: authErrs} - } - return nil, err - } else if err = Unmarshal(packet, &userAuthReq); err != nil { - return nil, err - } - - if userAuthReq.Service != serviceSSH { - return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) - } - - s.user = userAuthReq.User - - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { - return nil, err - } - } - } - - perms = nil - authErr := errors.New("no auth passed yet") - - switch userAuthReq.Method { - case "none": - if config.NoClientAuth { - authErr = nil - } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } - case "password": - if config.PasswordCallback == nil { - authErr = errors.New("ssh: password auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 || payload[0] != 0 { - return nil, parseError(msgUserAuthRequest) - } - payload = payload[1:] - password, payload, ok := parseString(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - perms, authErr = config.PasswordCallback(s, password) - case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configubred") - break - } - - prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) - case "publickey": - if config.PublicKeyCallback == nil { - authErr = errors.New("ssh: publickey auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 { - return nil, parseError(msgUserAuthRequest) - } - isQuery := payload[0] == 0 - payload = payload[1:] - algoBytes, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - algo := string(algoBytes) - if !isAcceptableAlgo(algo) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) - break - } - - pubKeyData, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - - pubKey, err := ParsePublicKey(pubKeyData) - if err != nil { - return nil, err - } - - candidate, ok := cache.get(s.user, pubKeyData) - if !ok { - candidate.user = s.user - candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( - s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) - } - cache.add(candidate) - } - - if isQuery { - // The client can query if the given public key - // would be okay. - - if len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - if candidate.result == nil { - okMsg := userAuthPubKeyOkMsg{ - Algo: algo, - PubKey: pubKeyData, - } - if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { - return nil, err - } - continue userAuthLoop - } - authErr = candidate.result - } else { - sig, payload, ok := parseSignature(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - // Ensure the public key algo and signature algo - // are supported. Compare the private key - // algorithm name that corresponds to algo with - // sig.Format. This is usually the same, but - // for certs, the names differ. - if !isAcceptableAlgo(sig.Format) { - break - } - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) - - if err := pubKey.Verify(signedData, sig); err != nil { - return nil, err - } - - authErr = candidate.result - perms = candidate.perms - } - default: - authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) - } - - authErrs = append(authErrs, authErr) - - if config.AuthLogCallback != nil { - config.AuthLogCallback(s, userAuthReq.Method, authErr) - } - - if authErr == nil { - break userAuthLoop - } - - authFailures++ - - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "password") - } - if config.PublicKeyCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "publickey") - } - if config.KeyboardInteractiveCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") - } - - if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { - return nil, err - } - } - - if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { - return nil, err - } - return perms, nil -} - -// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by -// asking the client on the other side of a ServerConn. -type sshClientKeyboardInteractive struct { - *connection -} - -func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { - if len(questions) != len(echos) { - return nil, errors.New("ssh: echos and questions must have equal length") - } - - var prompts []byte - for i := range questions { - prompts = appendString(prompts, questions[i]) - prompts = appendBool(prompts, echos[i]) - } - - if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ - Instruction: instruction, - NumPrompts: uint32(len(questions)), - Prompts: prompts, - })); err != nil { - return nil, err - } - - packet, err := c.transport.readPacket() - if err != nil { - return nil, err - } - if packet[0] != msgUserAuthInfoResponse { - return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) - } - packet = packet[1:] - - n, packet, ok := parseUint32(packet) - if !ok || int(n) != len(questions) { - return nil, parseError(msgUserAuthInfoResponse) - } - - for i := uint32(0); i < n; i++ { - ans, rest, ok := parseString(packet) - if !ok { - return nil, parseError(msgUserAuthInfoResponse) - } - - answers = append(answers, string(ans)) - packet = rest - } - if len(packet) != 0 { - return nil, errors.New("ssh: junk at end of message") - } - - return answers, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go deleted file mode 100644 index d3321f6b7..000000000 --- a/vendor/golang.org/x/crypto/ssh/session.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Session implements an interactive session described in -// "RFC 4254, section 6". - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "sync" -) - -type Signal string - -// POSIX signals as listed in RFC 4254 Section 6.10. -const ( - SIGABRT Signal = "ABRT" - SIGALRM Signal = "ALRM" - SIGFPE Signal = "FPE" - SIGHUP Signal = "HUP" - SIGILL Signal = "ILL" - SIGINT Signal = "INT" - SIGKILL Signal = "KILL" - SIGPIPE Signal = "PIPE" - SIGQUIT Signal = "QUIT" - SIGSEGV Signal = "SEGV" - SIGTERM Signal = "TERM" - SIGUSR1 Signal = "USR1" - SIGUSR2 Signal = "USR2" -) - -var signals = map[Signal]int{ - SIGABRT: 6, - SIGALRM: 14, - SIGFPE: 8, - SIGHUP: 1, - SIGILL: 4, - SIGINT: 2, - SIGKILL: 9, - SIGPIPE: 13, - SIGQUIT: 3, - SIGSEGV: 11, - SIGTERM: 15, -} - -type TerminalModes map[uint8]uint32 - -// POSIX terminal mode flags as listed in RFC 4254 Section 8. -const ( - tty_OP_END = 0 - VINTR = 1 - VQUIT = 2 - VERASE = 3 - VKILL = 4 - VEOF = 5 - VEOL = 6 - VEOL2 = 7 - VSTART = 8 - VSTOP = 9 - VSUSP = 10 - VDSUSP = 11 - VREPRINT = 12 - VWERASE = 13 - VLNEXT = 14 - VFLUSH = 15 - VSWTCH = 16 - VSTATUS = 17 - VDISCARD = 18 - IGNPAR = 30 - PARMRK = 31 - INPCK = 32 - ISTRIP = 33 - INLCR = 34 - IGNCR = 35 - ICRNL = 36 - IUCLC = 37 - IXON = 38 - IXANY = 39 - IXOFF = 40 - IMAXBEL = 41 - ISIG = 50 - ICANON = 51 - XCASE = 52 - ECHO = 53 - ECHOE = 54 - ECHOK = 55 - ECHONL = 56 - NOFLSH = 57 - TOSTOP = 58 - IEXTEN = 59 - ECHOCTL = 60 - ECHOKE = 61 - PENDIN = 62 - OPOST = 70 - OLCUC = 71 - ONLCR = 72 - OCRNL = 73 - ONOCR = 74 - ONLRET = 75 - CS7 = 90 - CS8 = 91 - PARENB = 92 - PARODD = 93 - TTY_OP_ISPEED = 128 - TTY_OP_OSPEED = 129 -) - -// A Session represents a connection to a remote command or shell. -type Session struct { - // Stdin specifies the remote process's standard input. - // If Stdin is nil, the remote process reads from an empty - // bytes.Buffer. - Stdin io.Reader - - // Stdout and Stderr specify the remote process's standard - // output and error. - // - // If either is nil, Run connects the corresponding file - // descriptor to an instance of ioutil.Discard. There is a - // fixed amount of buffering that is shared for the two streams. - // If either blocks it may eventually cause the remote - // command to block. - Stdout io.Writer - Stderr io.Writer - - ch Channel // the channel backing this session - started bool // true once Start, Run or Shell is invoked. - copyFuncs []func() error - errors chan error // one send per copyFunc - - // true if pipe method is active - stdinpipe, stdoutpipe, stderrpipe bool - - // stdinPipeWriter is non-nil if StdinPipe has not been called - // and Stdin was specified by the user; it is the write end of - // a pipe connecting Session.Stdin to the stdin channel. - stdinPipeWriter io.WriteCloser - - exitStatus chan error -} - -// SendRequest sends an out-of-band channel request on the SSH channel -// underlying the session. -func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - return s.ch.SendRequest(name, wantReply, payload) -} - -func (s *Session) Close() error { - return s.ch.Close() -} - -// RFC 4254 Section 6.4. -type setenvRequest struct { - Name string - Value string -} - -// Setenv sets an environment variable that will be applied to any -// command executed by Shell or Run. -func (s *Session) Setenv(name, value string) error { - msg := setenvRequest{ - Name: name, - Value: value, - } - ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: setenv failed") - } - return err -} - -// RFC 4254 Section 6.2. -type ptyRequestMsg struct { - Term string - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 - Modelist string -} - -// RequestPty requests the association of a pty with the session on the remote host. -func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { - var tm []byte - for k, v := range termmodes { - kv := struct { - Key byte - Val uint32 - }{k, v} - - tm = append(tm, Marshal(&kv)...) - } - tm = append(tm, tty_OP_END) - req := ptyRequestMsg{ - Term: term, - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - Modelist: string(tm), - } - ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) - if err == nil && !ok { - err = errors.New("ssh: pty-req failed") - } - return err -} - -// RFC 4254 Section 6.5. -type subsystemRequestMsg struct { - Subsystem string -} - -// RequestSubsystem requests the association of a subsystem with the session on the remote host. -// A subsystem is a predefined command that runs in the background when the ssh session is initiated -func (s *Session) RequestSubsystem(subsystem string) error { - msg := subsystemRequestMsg{ - Subsystem: subsystem, - } - ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: subsystem request failed") - } - return err -} - -// RFC 4254 Section 6.7. -type ptyWindowChangeMsg struct { - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 -} - -// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. -func (s *Session) WindowChange(h, w int) error { - req := ptyWindowChangeMsg{ - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - } - _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) - return err -} - -// RFC 4254 Section 6.9. -type signalMsg struct { - Signal string -} - -// Signal sends the given signal to the remote process. -// sig is one of the SIG* constants. -func (s *Session) Signal(sig Signal) error { - msg := signalMsg{ - Signal: string(sig), - } - - _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) - return err -} - -// RFC 4254 Section 6.5. -type execMsg struct { - Command string -} - -// Start runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start or Shell. -func (s *Session) Start(cmd string) error { - if s.started { - return errors.New("ssh: session already started") - } - req := execMsg{ - Command: cmd, - } - - ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) - if err == nil && !ok { - err = fmt.Errorf("ssh: command %v failed", cmd) - } - if err != nil { - return err - } - return s.start() -} - -// Run runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start, Shell, Output, -// or CombinedOutput. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Run(cmd string) error { - err := s.Start(cmd) - if err != nil { - return err - } - return s.Wait() -} - -// Output runs cmd on the remote host and returns its standard output. -func (s *Session) Output(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - var b bytes.Buffer - s.Stdout = &b - err := s.Run(cmd) - return b.Bytes(), err -} - -type singleWriter struct { - b bytes.Buffer - mu sync.Mutex -} - -func (w *singleWriter) Write(p []byte) (int, error) { - w.mu.Lock() - defer w.mu.Unlock() - return w.b.Write(p) -} - -// CombinedOutput runs cmd on the remote host and returns its combined -// standard output and standard error. -func (s *Session) CombinedOutput(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - var b singleWriter - s.Stdout = &b - s.Stderr = &b - err := s.Run(cmd) - return b.b.Bytes(), err -} - -// Shell starts a login shell on the remote host. A Session only -// accepts one call to Run, Start, Shell, Output, or CombinedOutput. -func (s *Session) Shell() error { - if s.started { - return errors.New("ssh: session already started") - } - - ok, err := s.ch.SendRequest("shell", true, nil) - if err == nil && !ok { - return errors.New("ssh: could not start shell") - } - if err != nil { - return err - } - return s.start() -} - -func (s *Session) start() error { - s.started = true - - type F func(*Session) - for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { - setupFd(s) - } - - s.errors = make(chan error, len(s.copyFuncs)) - for _, fn := range s.copyFuncs { - go func(fn func() error) { - s.errors <- fn() - }(fn) - } - return nil -} - -// Wait waits for the remote command to exit. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Wait() error { - if !s.started { - return errors.New("ssh: session not started") - } - waitErr := <-s.exitStatus - - if s.stdinPipeWriter != nil { - s.stdinPipeWriter.Close() - } - var copyError error - for range s.copyFuncs { - if err := <-s.errors; err != nil && copyError == nil { - copyError = err - } - } - if waitErr != nil { - return waitErr - } - return copyError -} - -func (s *Session) wait(reqs <-chan *Request) error { - wm := Waitmsg{status: -1} - // Wait for msg channel to be closed before returning. - for msg := range reqs { - switch msg.Type { - case "exit-status": - wm.status = int(binary.BigEndian.Uint32(msg.Payload)) - case "exit-signal": - var sigval struct { - Signal string - CoreDumped bool - Error string - Lang string - } - if err := Unmarshal(msg.Payload, &sigval); err != nil { - return err - } - - // Must sanitize strings? - wm.signal = sigval.Signal - wm.msg = sigval.Error - wm.lang = sigval.Lang - default: - // This handles keepalives and matches - // OpenSSH's behaviour. - if msg.WantReply { - msg.Reply(false, nil) - } - } - } - if wm.status == 0 { - return nil - } - if wm.status == -1 { - // exit-status was never sent from server - if wm.signal == "" { - // signal was not sent either. RFC 4254 - // section 6.10 recommends against this - // behavior, but it is allowed, so we let - // clients handle it. - return &ExitMissingError{} - } - wm.status = 128 - if _, ok := signals[Signal(wm.signal)]; ok { - wm.status += signals[Signal(wm.signal)] - } - } - - return &ExitError{wm} -} - -// ExitMissingError is returned if a session is torn down cleanly, but -// the server sends no confirmation of the exit status. -type ExitMissingError struct{} - -func (e *ExitMissingError) Error() string { - return "wait: remote command exited without exit status or exit signal" -} - -func (s *Session) stdin() { - if s.stdinpipe { - return - } - var stdin io.Reader - if s.Stdin == nil { - stdin = new(bytes.Buffer) - } else { - r, w := io.Pipe() - go func() { - _, err := io.Copy(w, s.Stdin) - w.CloseWithError(err) - }() - stdin, s.stdinPipeWriter = r, w - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.ch, stdin) - if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { - err = err1 - } - return err - }) -} - -func (s *Session) stdout() { - if s.stdoutpipe { - return - } - if s.Stdout == nil { - s.Stdout = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stdout, s.ch) - return err - }) -} - -func (s *Session) stderr() { - if s.stderrpipe { - return - } - if s.Stderr == nil { - s.Stderr = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stderr, s.ch.Stderr()) - return err - }) -} - -// sessionStdin reroutes Close to CloseWrite. -type sessionStdin struct { - io.Writer - ch Channel -} - -func (s *sessionStdin) Close() error { - return s.ch.CloseWrite() -} - -// StdinPipe returns a pipe that will be connected to the -// remote command's standard input when the command starts. -func (s *Session) StdinPipe() (io.WriteCloser, error) { - if s.Stdin != nil { - return nil, errors.New("ssh: Stdin already set") - } - if s.started { - return nil, errors.New("ssh: StdinPipe after process started") - } - s.stdinpipe = true - return &sessionStdin{s.ch, s.ch}, nil -} - -// StdoutPipe returns a pipe that will be connected to the -// remote command's standard output when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StdoutPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StdoutPipe() (io.Reader, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.started { - return nil, errors.New("ssh: StdoutPipe after process started") - } - s.stdoutpipe = true - return s.ch, nil -} - -// StderrPipe returns a pipe that will be connected to the -// remote command's standard error when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StderrPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StderrPipe() (io.Reader, error) { - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - if s.started { - return nil, errors.New("ssh: StderrPipe after process started") - } - s.stderrpipe = true - return s.ch.Stderr(), nil -} - -// newSession returns a new interactive session on the remote host. -func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { - s := &Session{ - ch: ch, - } - s.exitStatus = make(chan error, 1) - go func() { - s.exitStatus <- s.wait(reqs) - }() - - return s, nil -} - -// An ExitError reports unsuccessful completion of a remote command. -type ExitError struct { - Waitmsg -} - -func (e *ExitError) Error() string { - return e.Waitmsg.String() -} - -// Waitmsg stores the information about an exited remote command -// as reported by Wait. -type Waitmsg struct { - status int - signal string - msg string - lang string -} - -// ExitStatus returns the exit status of the remote command. -func (w Waitmsg) ExitStatus() int { - return w.status -} - -// Signal returns the exit signal of the remote command if -// it was terminated violently. -func (w Waitmsg) Signal() string { - return w.signal -} - -// Msg returns the exit message given by the remote command -func (w Waitmsg) Msg() string { - return w.msg -} - -// Lang returns the language tag. See RFC 3066 -func (w Waitmsg) Lang() string { - return w.lang -} - -func (w Waitmsg) String() string { - str := fmt.Sprintf("Process exited with status %v", w.status) - if w.signal != "" { - str += fmt.Sprintf(" from signal %v", w.signal) - } - if w.msg != "" { - str += fmt.Sprintf(". Reason was: %v", w.msg) - } - return str -} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go deleted file mode 100644 index a2dccc64c..000000000 --- a/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ /dev/null @@ -1,115 +0,0 @@ -package ssh - -import ( - "errors" - "io" - "net" -) - -// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "direct-streamlocal@openssh.com" string. -// -// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 -type streamLocalChannelOpenDirectMsg struct { - socketPath string - reserved0 string - reserved1 uint32 -} - -// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "forwarded-streamlocal@openssh.com" string. -type forwardedStreamLocalPayload struct { - SocketPath string - Reserved0 string -} - -// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message -// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. -type streamLocalChannelForwardMsg struct { - socketPath string -} - -// ListenUnix is similar to ListenTCP but uses a Unix domain socket. -func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { - m := streamLocalChannelForwardMsg{ - socketPath, - } - // send message - ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") - } - ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) - - return &unixListener{socketPath, c, ch}, nil -} - -func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { - msg := streamLocalChannelOpenDirectMsg{ - socketPath: socketPath, - } - ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type unixListener struct { - socketPath string - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *unixListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - }, nil -} - -// Close closes the listener. -func (l *unixListener) Close() error { - // this also closes the listener. - l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) - m := streamLocalChannelForwardMsg{ - l.socketPath, - } - ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *unixListener) Addr() net.Addr { - return &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - } -} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go deleted file mode 100644 index acf17175d..000000000 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "fmt" - "io" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" -) - -// Listen requests the remote peer open a listening socket on -// addr. Incoming connections will be available by calling Accept on -// the returned net.Listener. The listener must be serviced, or the -// SSH connection may hang. -// N must be "tcp", "tcp4", "tcp6", or "unix". -func (c *Client) Listen(n, addr string) (net.Listener, error) { - switch n { - case "tcp", "tcp4", "tcp6": - laddr, err := net.ResolveTCPAddr(n, addr) - if err != nil { - return nil, err - } - return c.ListenTCP(laddr) - case "unix": - return c.ListenUnix(addr) - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// Automatic port allocation is broken with OpenSSH before 6.0. See -// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In -// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, -// rather than the actual port number. This means you can never open -// two different listeners with auto allocated ports. We work around -// this by trying explicit ports until we succeed. - -const openSSHPrefix = "OpenSSH_" - -var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) - -// isBrokenOpenSSHVersion returns true if the given version string -// specifies a version of OpenSSH that is known to have a bug in port -// forwarding. -func isBrokenOpenSSHVersion(versionStr string) bool { - i := strings.Index(versionStr, openSSHPrefix) - if i < 0 { - return false - } - i += len(openSSHPrefix) - j := i - for ; j < len(versionStr); j++ { - if versionStr[j] < '0' || versionStr[j] > '9' { - break - } - } - version, _ := strconv.Atoi(versionStr[i:j]) - return version < 6 -} - -// autoPortListenWorkaround simulates automatic port allocation by -// trying random ports repeatedly. -func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { - var sshListener net.Listener - var err error - const tries = 10 - for i := 0; i < tries; i++ { - addr := *laddr - addr.Port = 1024 + portRandomizer.Intn(60000) - sshListener, err = c.ListenTCP(&addr) - if err == nil { - laddr.Port = addr.Port - return sshListener, err - } - } - return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) -} - -// RFC 4254 7.1 -type channelForwardMsg struct { - addr string - rport uint32 -} - -// ListenTCP requests the remote peer open a listening socket -// on laddr. Incoming connections will be available by calling -// Accept on the returned net.Listener. -func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { - if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { - return c.autoPortListenWorkaround(laddr) - } - - m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), - } - // send message - ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: tcpip-forward request denied by peer") - } - - // If the original port was 0, then the remote side will - // supply a real port number in the response. - if laddr.Port == 0 { - var p struct { - Port uint32 - } - if err := Unmarshal(resp, &p); err != nil { - return nil, err - } - laddr.Port = int(p.Port) - } - - // Register this forward, using the port number we obtained. - ch := c.forwards.add(laddr) - - return &tcpListener{laddr, c, ch}, nil -} - -// forwardList stores a mapping between remote -// forward requests and the tcpListeners. -type forwardList struct { - sync.Mutex - entries []forwardEntry -} - -// forwardEntry represents an established mapping of a laddr on a -// remote ssh server to a channel connected to a tcpListener. -type forwardEntry struct { - laddr net.Addr - c chan forward -} - -// forward represents an incoming forwarded tcpip connection. The -// arguments to add/remove/lookup should be address as specified in -// the original forward-request. -type forward struct { - newCh NewChannel // the ssh client channel underlying this forward - raddr net.Addr // the raddr of the incoming connection -} - -func (l *forwardList) add(addr net.Addr) chan forward { - l.Lock() - defer l.Unlock() - f := forwardEntry{ - laddr: addr, - c: make(chan forward, 1), - } - l.entries = append(l.entries, f) - return f.c -} - -// See RFC 4254, section 7.2 -type forwardedTCPPayload struct { - Addr string - Port uint32 - OriginAddr string - OriginPort uint32 -} - -// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. -func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { - if port == 0 || port > 65535 { - return nil, fmt.Errorf("ssh: port number out of range: %d", port) - } - ip := net.ParseIP(string(addr)) - if ip == nil { - return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) - } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil -} - -func (l *forwardList) handleChannels(in <-chan NewChannel) { - for ch := range in { - var ( - laddr net.Addr - raddr net.Addr - err error - ) - switch channelType := ch.ChannelType(); channelType { - case "forwarded-tcpip": - var payload forwardedTCPPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) - continue - } - - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err = parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - - case "forwarded-streamlocal@openssh.com": - var payload forwardedStreamLocalPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) - continue - } - laddr = &net.UnixAddr{ - Name: payload.SocketPath, - Net: "unix", - } - raddr = &net.UnixAddr{ - Name: "@", - Net: "unix", - } - default: - panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) - } - if ok := l.forward(laddr, raddr, ch); !ok { - // Section 7.2, implementations MUST reject spurious incoming - // connections. - ch.Reject(Prohibited, "no forward for address") - continue - } - - } -} - -// remove removes the forward entry, and the channel feeding its -// listener. -func (l *forwardList) remove(addr net.Addr) { - l.Lock() - defer l.Unlock() - for i, f := range l.entries { - if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { - l.entries = append(l.entries[:i], l.entries[i+1:]...) - close(f.c) - return - } - } -} - -// closeAll closes and clears all forwards. -func (l *forwardList) closeAll() { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - close(f.c) - } - l.entries = nil -} - -func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { - f.c <- forward{newCh: ch, raddr: raddr} - return true - } - } - return false -} - -type tcpListener struct { - laddr *net.TCPAddr - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *tcpListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: l.laddr, - raddr: s.raddr, - }, nil -} - -// Close closes the listener. -func (l *tcpListener) Close() error { - m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), - } - - // this also closes the listener. - l.conn.forwards.remove(l.laddr) - ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-tcpip-forward failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *tcpListener) Addr() net.Addr { - return l.laddr -} - -// Dial initiates a connection to the addr from the remote host. -// The resulting connection has a zero LocalAddr() and RemoteAddr(). -func (c *Client) Dial(n, addr string) (net.Conn, error) { - var ch Channel - switch n { - case "tcp", "tcp4", "tcp6": - // Parse the address into host and numeric port. - host, portString, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portString, 10, 16) - if err != nil { - return nil, err - } - ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) - if err != nil { - return nil, err - } - // Use a zero address for local and remote address. - zeroAddr := &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - return &chanConn{ - Channel: ch, - laddr: zeroAddr, - raddr: zeroAddr, - }, nil - case "unix": - var err error - ch, err = c.dialStreamLocal(addr) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: addr, - Net: "unix", - }, - }, nil - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// DialTCP connects to the remote address raddr on the network net, -// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used -// as the local address for the connection. -func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { - if laddr == nil { - laddr = &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - } - ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: laddr, - raddr: raddr, - }, nil -} - -// RFC 4254 7.2 -type channelOpenDirectMsg struct { - raddr string - rport uint32 - laddr string - lport uint32 -} - -func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { - msg := channelOpenDirectMsg{ - raddr: raddr, - rport: uint32(rport), - laddr: laddr, - lport: uint32(lport), - } - ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type tcpChan struct { - Channel // the backing channel -} - -// chanConn fulfills the net.Conn interface without -// the tcpChan having to hold laddr or raddr directly. -type chanConn struct { - Channel - laddr, raddr net.Addr -} - -// LocalAddr returns the local network address. -func (t *chanConn) LocalAddr() net.Addr { - return t.laddr -} - -// RemoteAddr returns the remote network address. -func (t *chanConn) RemoteAddr() net.Addr { - return t.raddr -} - -// SetDeadline sets the read and write deadlines associated -// with the connection. -func (t *chanConn) SetDeadline(deadline time.Time) error { - if err := t.SetReadDeadline(deadline); err != nil { - return err - } - return t.SetWriteDeadline(deadline) -} - -// SetReadDeadline sets the read deadline. -// A zero value for t means Read will not time out. -// After the deadline, the error from Read will implement net.Error -// with Timeout() == true. -func (t *chanConn) SetReadDeadline(deadline time.Time) error { - // for compatibility with previous version, - // the error message contains "tcpChan" - return errors.New("ssh: tcpChan: deadline not supported") -} - -// SetWriteDeadline exists to satisfy the net.Conn interface -// but is not implemented by this type. It always returns an error. -func (t *chanConn) SetWriteDeadline(deadline time.Time) error { - return errors.New("ssh: tcpChan: deadline not supported") -} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go deleted file mode 100644 index 82da0d75c..000000000 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bufio" - "bytes" - "errors" - "io" - "log" -) - -// debugTransport if set, will print packet types as they go over the -// wire. No message decoding is done, to minimize the impact on timing. -const debugTransport = false - -const ( - gcmCipherID = "aes128-gcm@openssh.com" - aes128cbcID = "aes128-cbc" - tripledescbcID = "3des-cbc" -) - -// packetConn represents a transport that implements packet based -// operations. -type packetConn interface { - // Encrypt and send a packet of data to the remote peer. - writePacket(packet []byte) error - - // Read a packet from the connection. The read is blocking, - // i.e. if error is nil, then the returned byte slice is - // always non-empty. - readPacket() ([]byte, error) - - // Close closes the write-side of the connection. - Close() error -} - -// transport is the keyingTransport that implements the SSH packet -// protocol. -type transport struct { - reader connectionState - writer connectionState - - bufReader *bufio.Reader - bufWriter *bufio.Writer - rand io.Reader - isClient bool - io.Closer -} - -// packetCipher represents a combination of SSH encryption/MAC -// protocol. A single instance should be used for one direction only. -type packetCipher interface { - // writePacket encrypts the packet and writes it to w. The - // contents of the packet are generally scrambled. - writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error - - // readPacket reads and decrypts a packet of data. The - // returned packet may be overwritten by future calls of - // readPacket. - readPacket(seqnum uint32, r io.Reader) ([]byte, error) -} - -// connectionState represents one side (read or write) of the -// connection. This is necessary because each direction has its own -// keys, and can even have its own algorithms -type connectionState struct { - packetCipher - seqNum uint32 - dir direction - pendingKeyChange chan packetCipher -} - -// prepareKeyChange sets up key material for a keychange. The key changes in -// both directions are triggered by reading and writing a msgNewKey packet -// respectively. -func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) - if err != nil { - return err - } - t.reader.pendingKeyChange <- ciph - - ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) - if err != nil { - return err - } - t.writer.pendingKeyChange <- ciph - - return nil -} - -func (t *transport) printPacket(p []byte, write bool) { - if len(p) == 0 { - return - } - who := "server" - if t.isClient { - who = "client" - } - what := "read" - if write { - what = "write" - } - - log.Println(what, who, p[0]) -} - -// Read and decrypt next packet. -func (t *transport) readPacket() (p []byte, err error) { - for { - p, err = t.reader.readPacket(t.bufReader) - if err != nil { - break - } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { - break - } - } - if debugTransport { - t.printPacket(p, false) - } - - return p, err -} - -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { - packet, err := s.packetCipher.readPacket(s.seqNum, r) - s.seqNum++ - if err == nil && len(packet) == 0 { - err = errors.New("ssh: zero length packet") - } - - if len(packet) > 0 { - switch packet[0] { - case msgNewKeys: - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - return nil, errors.New("ssh: got bogus newkeys message") - } - - case msgDisconnect: - // Transform a disconnect message into an - // error. Since this is lowest level at which - // we interpret message types, doing it here - // ensures that we don't have to handle it - // elsewhere. - var msg disconnectMsg - if err := Unmarshal(packet, &msg); err != nil { - return nil, err - } - return nil, &msg - } - } - - // The packet may point to an internal buffer, so copy the - // packet out here. - fresh := make([]byte, len(packet)) - copy(fresh, packet) - - return fresh, err -} - -func (t *transport) writePacket(packet []byte) error { - if debugTransport { - t.printPacket(packet, true) - } - return t.writer.writePacket(t.bufWriter, t.rand, packet) -} - -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { - changeKeys := len(packet) > 0 && packet[0] == msgNewKeys - - err := s.packetCipher.writePacket(s.seqNum, w, rand, packet) - if err != nil { - return err - } - if err = w.Flush(); err != nil { - return err - } - s.seqNum++ - if changeKeys { - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - panic("ssh: no key material for msgNewKeys") - } - } - return err -} - -func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { - t := &transport{ - bufReader: bufio.NewReader(rwc), - bufWriter: bufio.NewWriter(rwc), - rand: rand, - reader: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - writer: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - Closer: rwc, - } - t.isClient = isClient - - if isClient { - t.reader.dir = serverKeys - t.writer.dir = clientKeys - } else { - t.reader.dir = clientKeys - t.writer.dir = serverKeys - } - - return t -} - -type direction struct { - ivTag []byte - keyTag []byte - macKeyTag []byte -} - -var ( - serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} - clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} -) - -// generateKeys generates key material for IV, MAC and encryption. -func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) { - cipherMode := cipherModes[algs.Cipher] - macMode := macModes[algs.MAC] - - iv = make([]byte, cipherMode.ivSize) - key = make([]byte, cipherMode.keySize) - macKey = make([]byte, macMode.keySize) - - generateKeyMaterial(iv, d.ivTag, kex) - generateKeyMaterial(key, d.keyTag, kex) - generateKeyMaterial(macKey, d.macKeyTag, kex) - return -} - -// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as -// described in RFC 4253, section 6.4. direction should either be serverKeys -// (to setup server->client keys) or clientKeys (for client->server keys). -func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { - iv, key, macKey := generateKeys(d, algs, kex) - - if algs.Cipher == gcmCipherID { - return newGCMCipher(iv, key) - } - - if algs.Cipher == aes128cbcID { - return newAESCBCCipher(iv, key, macKey, algs) - } - - if algs.Cipher == tripledescbcID { - return newTripleDESCBCCipher(iv, key, macKey, algs) - } - - c := &streamPacketCipher{ - mac: macModes[algs.MAC].new(macKey), - etm: macModes[algs.MAC].etm, - } - c.macResult = make([]byte, c.mac.Size()) - - var err error - c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv) - if err != nil { - return nil, err - } - - return c, nil -} - -// generateKeyMaterial fills out with key material generated from tag, K, H -// and sessionId, as specified in RFC 4253, section 7.2. -func generateKeyMaterial(out, tag []byte, r *kexResult) { - var digestsSoFar []byte - - h := r.Hash.New() - for len(out) > 0 { - h.Reset() - h.Write(r.K) - h.Write(r.H) - - if len(digestsSoFar) == 0 { - h.Write(tag) - h.Write(r.SessionID) - } else { - h.Write(digestsSoFar) - } - - digest := h.Sum(nil) - n := copy(out, digest) - out = out[n:] - if len(out) > 0 { - digestsSoFar = append(digestsSoFar, digest...) - } - } -} - -const packageVersion = "SSH-2.0-Go" - -// Sends and receives a version line. The versionLine string should -// be US ASCII, start with "SSH-2.0-", and should not include a -// newline. exchangeVersions returns the other side's version line. -func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { - // Contrary to the RFC, we do not ignore lines that don't - // start with "SSH-2.0-" to make the library usable with - // nonconforming servers. - for _, c := range versionLine { - // The spec disallows non US-ASCII chars, and - // specifically forbids null chars. - if c < 32 { - return nil, errors.New("ssh: junk character in version line") - } - } - if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { - return - } - - them, err = readVersion(rw) - return them, err -} - -// maxVersionStringBytes is the maximum number of bytes that we'll -// accept as a version string. RFC 4253 section 4.2 limits this at 255 -// chars -const maxVersionStringBytes = 255 - -// Read version string as specified by RFC 4253, section 4.2. -func readVersion(r io.Reader) ([]byte, error) { - versionString := make([]byte, 0, 64) - var ok bool - var buf [1]byte - - for length := 0; length < maxVersionStringBytes; length++ { - _, err := io.ReadFull(r, buf[:]) - if err != nil { - return nil, err - } - // The RFC says that the version should be terminated with \r\n - // but several SSH servers actually only send a \n. - if buf[0] == '\n' { - if !bytes.HasPrefix(versionString, []byte("SSH-")) { - // RFC 4253 says we need to ignore all version string lines - // except the one containing the SSH version (provided that - // all the lines do not exceed 255 bytes in total). - versionString = versionString[:0] - continue - } - ok = true - break - } - - // non ASCII chars are disallowed, but we are lenient, - // since Go doesn't use null-terminated strings. - - // The RFC allows a comment after a space, however, - // all of it (version and comments) goes into the - // session hash. - versionString = append(versionString, buf[0]) - } - - if !ok { - return nil, errors.New("ssh: overflow reading version string") - } - - // There might be a '\r' on the end which we should remove. - if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { - versionString = versionString[:len(versionString)-1] - } - return versionString, nil -} diff --git a/vendor/golang.org/x/sys/unix/mksyscall.pl b/vendor/golang.org/x/sys/unix/mksyscall.pl deleted file mode 100755 index 73e26cafa..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall.pl +++ /dev/null @@ -1,340 +0,0 @@ -#!/usr/bin/env perl -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# This program reads a file containing function prototypes -# (like syscall_darwin.go) and generates system call bodies. -# The prototypes are marked by lines beginning with "//sys" -# and read like func declarations if //sys is replaced by func, but: -# * The parameter lists must give a name for each argument. -# This includes return parameters. -# * The parameter lists must give a type for each argument: -# the (x, y, z int) shorthand is not allowed. -# * If the return parameter is an error number, it must be named errno. - -# A line beginning with //sysnb is like //sys, except that the -# goroutine will not be suspended during the execution of the system -# call. This must only be used for system calls which can never -# block, as otherwise the system call could cause all goroutines to -# hang. - -use strict; - -my $cmdline = "mksyscall.pl " . join(' ', @ARGV); -my $errors = 0; -my $_32bit = ""; -my $plan9 = 0; -my $openbsd = 0; -my $netbsd = 0; -my $dragonfly = 0; -my $arm = 0; # 64-bit value should use (even, odd)-pair -my $tags = ""; # build tags - -if($ARGV[0] eq "-b32") { - $_32bit = "big-endian"; - shift; -} elsif($ARGV[0] eq "-l32") { - $_32bit = "little-endian"; - shift; -} -if($ARGV[0] eq "-plan9") { - $plan9 = 1; - shift; -} -if($ARGV[0] eq "-openbsd") { - $openbsd = 1; - shift; -} -if($ARGV[0] eq "-netbsd") { - $netbsd = 1; - shift; -} -if($ARGV[0] eq "-dragonfly") { - $dragonfly = 1; - shift; -} -if($ARGV[0] eq "-arm") { - $arm = 1; - shift; -} -if($ARGV[0] eq "-tags") { - shift; - $tags = $ARGV[0]; - shift; -} - -if($ARGV[0] =~ /^-/) { - print STDERR "usage: mksyscall.pl [-b32 | -l32] [-tags x,y] [file ...]\n"; - exit 1; -} - -# Check that we are using the new build system if we should -if($ENV{'GOOS'} eq "linux" && $ENV{'GOARCH'} ne "sparc64") { - if($ENV{'GOLANG_SYS_BUILD'} ne "docker") { - print STDERR "In the new build system, mksyscall should not be called directly.\n"; - print STDERR "See README.md\n"; - exit 1; - } -} - - -sub parseparamlist($) { - my ($list) = @_; - $list =~ s/^\s*//; - $list =~ s/\s*$//; - if($list eq "") { - return (); - } - return split(/\s*,\s*/, $list); -} - -sub parseparam($) { - my ($p) = @_; - if($p !~ /^(\S*) (\S*)$/) { - print STDERR "$ARGV:$.: malformed parameter: $p\n"; - $errors = 1; - return ("xx", "int"); - } - return ($1, $2); -} - -my $text = ""; -while(<>) { - chomp; - s/\s+/ /g; - s/^\s+//; - s/\s+$//; - my $nonblock = /^\/\/sysnb /; - next if !/^\/\/sys / && !$nonblock; - - # Line must be of the form - # func Open(path string, mode int, perm int) (fd int, errno error) - # Split into name, in params, out params. - if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) { - print STDERR "$ARGV:$.: malformed //sys declaration\n"; - $errors = 1; - next; - } - my ($func, $in, $out, $sysname) = ($2, $3, $4, $5); - - # Split argument lists on comma. - my @in = parseparamlist($in); - my @out = parseparamlist($out); - - # Try in vain to keep people from editing this file. - # The theory is that they jump into the middle of the file - # without reading the header. - $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; - - # Go function header. - my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : ""; - $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl; - - # Check if err return available - my $errvar = ""; - foreach my $p (@out) { - my ($name, $type) = parseparam($p); - if($type eq "error") { - $errvar = $name; - last; - } - } - - # Prepare arguments to Syscall. - my @args = (); - my $n = 0; - foreach my $p (@in) { - my ($name, $type) = parseparam($p); - if($type =~ /^\*/) { - push @args, "uintptr(unsafe.Pointer($name))"; - } elsif($type eq "string" && $errvar ne "") { - $text .= "\tvar _p$n *byte\n"; - $text .= "\t_p$n, $errvar = BytePtrFromString($name)\n"; - $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type eq "string") { - print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; - $text .= "\tvar _p$n *byte\n"; - $text .= "\t_p$n, _ = BytePtrFromString($name)\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type =~ /^\[\](.*)/) { - # Convert slice into pointer, length. - # Have to be careful not to take address of &a[0] if len == 0: - # pass dummy pointer in that case. - # Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - $text .= "\tvar _p$n unsafe.Pointer\n"; - $text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}"; - $text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}"; - $text .= "\n"; - push @args, "uintptr(_p$n)", "uintptr(len($name))"; - $n++; - } elsif($type eq "int64" && ($openbsd || $netbsd)) { - push @args, "0"; - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } elsif($_32bit eq "little-endian") { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } else { - push @args, "uintptr($name)"; - } - } elsif($type eq "int64" && $dragonfly) { - if ($func !~ /^extp(read|write)/i) { - push @args, "0"; - } - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } elsif($_32bit eq "little-endian") { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } else { - push @args, "uintptr($name)"; - } - } elsif($type eq "int64" && $_32bit ne "") { - if(@args % 2 && $arm) { - # arm abi specifies 64-bit argument uses - # (even, odd) pair - push @args, "0" - } - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } else { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } - } else { - push @args, "uintptr($name)"; - } - } - - # Determine which form to use; pad args with zeros. - my $asm = "Syscall"; - if ($nonblock) { - if ($errvar ne "") { - $asm = "RawSyscall"; - } else { - $asm = "RawSyscallNoError"; - } - } else { - if ($errvar eq "") { - $asm = "SyscallNoError"; - } - } - if(@args <= 3) { - while(@args < 3) { - push @args, "0"; - } - } elsif(@args <= 6) { - $asm .= "6"; - while(@args < 6) { - push @args, "0"; - } - } elsif(@args <= 9) { - $asm .= "9"; - while(@args < 9) { - push @args, "0"; - } - } else { - print STDERR "$ARGV:$.: too many arguments to system call\n"; - } - - # System call number. - if($sysname eq "") { - $sysname = "SYS_$func"; - $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar - $sysname =~ y/a-z/A-Z/; - } - - # Actual call. - my $args = join(', ', @args); - my $call = "$asm($sysname, $args)"; - - # Assign return values. - my $body = ""; - my @ret = ("_", "_", "_"); - my $do_errno = 0; - for(my $i=0; $i<@out; $i++) { - my $p = $out[$i]; - my ($name, $type) = parseparam($p); - my $reg = ""; - if($name eq "err" && !$plan9) { - $reg = "e1"; - $ret[2] = $reg; - $do_errno = 1; - } elsif($name eq "err" && $plan9) { - $ret[0] = "r0"; - $ret[2] = "e1"; - next; - } else { - $reg = sprintf("r%d", $i); - $ret[$i] = $reg; - } - if($type eq "bool") { - $reg = "$reg != 0"; - } - if($type eq "int64" && $_32bit ne "") { - # 64-bit number in r1:r0 or r0:r1. - if($i+2 > @out) { - print STDERR "$ARGV:$.: not enough registers for int64 return\n"; - } - if($_32bit eq "big-endian") { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); - } else { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); - } - $ret[$i] = sprintf("r%d", $i); - $ret[$i+1] = sprintf("r%d", $i+1); - } - if($reg ne "e1" || $plan9) { - $body .= "\t$name = $type($reg)\n"; - } - } - if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { - $text .= "\t$call\n"; - } else { - if ($errvar ne "") { - $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; - } else { - $text .= "\t$ret[0], $ret[1] := $call\n"; - } - } - $text .= $body; - - if ($plan9 && $ret[2] eq "e1") { - $text .= "\tif int32(r0) == -1 {\n"; - $text .= "\t\terr = e1\n"; - $text .= "\t}\n"; - } elsif ($do_errno) { - $text .= "\tif e1 != 0 {\n"; - $text .= "\t\terr = errnoErr(e1)\n"; - $text .= "\t}\n"; - } - $text .= "\treturn\n"; - $text .= "}\n\n"; -} - -chomp $text; -chomp $text; - -if($errors) { - exit 1; -} - -print <) { - chomp; - s/\s+/ /g; - s/^\s+//; - s/\s+$//; - $package = $1 if !$package && /^package (\S+)$/; - my $nonblock = /^\/\/sysnb /; - next if !/^\/\/sys / && !$nonblock; - - # Line must be of the form - # func Open(path string, mode int, perm int) (fd int, err error) - # Split into name, in params, out params. - if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) { - print STDERR "$ARGV:$.: malformed //sys declaration\n"; - $errors = 1; - next; - } - my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6); - - # Split argument lists on comma. - my @in = parseparamlist($in); - my @out = parseparamlist($out); - - # So file name. - if($modname eq "") { - $modname = "libc"; - } - - # System call name. - if($sysname eq "") { - $sysname = "$func"; - } - - # System call pointer variable name. - my $sysvarname = "proc$sysname"; - - my $strconvfunc = "BytePtrFromString"; - my $strconvtype = "*byte"; - - $sysname =~ y/A-Z/a-z/; # All libc functions are lowercase. - - # Runtime import of function to allow cross-platform builds. - $dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n"; - # Link symbol to proc address variable. - $linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n"; - # Library proc address variable. - push @vars, $sysvarname; - - # Go function header. - $out = join(', ', @out); - if($out ne "") { - $out = " ($out)"; - } - if($text ne "") { - $text .= "\n" - } - $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out; - - # Check if err return available - my $errvar = ""; - foreach my $p (@out) { - my ($name, $type) = parseparam($p); - if($type eq "error") { - $errvar = $name; - last; - } - } - - # Prepare arguments to Syscall. - my @args = (); - my $n = 0; - foreach my $p (@in) { - my ($name, $type) = parseparam($p); - if($type =~ /^\*/) { - push @args, "uintptr(unsafe.Pointer($name))"; - } elsif($type eq "string" && $errvar ne "") { - $text .= "\tvar _p$n $strconvtype\n"; - $text .= "\t_p$n, $errvar = $strconvfunc($name)\n"; - $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type eq "string") { - print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; - $text .= "\tvar _p$n $strconvtype\n"; - $text .= "\t_p$n, _ = $strconvfunc($name)\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type =~ /^\[\](.*)/) { - # Convert slice into pointer, length. - # Have to be careful not to take address of &a[0] if len == 0: - # pass nil in that case. - $text .= "\tvar _p$n *$1\n"; - $text .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))", "uintptr(len($name))"; - $n++; - } elsif($type eq "int64" && $_32bit ne "") { - if($_32bit eq "big-endian") { - push @args, "uintptr($name >> 32)", "uintptr($name)"; - } else { - push @args, "uintptr($name)", "uintptr($name >> 32)"; - } - } elsif($type eq "bool") { - $text .= "\tvar _p$n uint32\n"; - $text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n"; - push @args, "uintptr(_p$n)"; - $n++; - } else { - push @args, "uintptr($name)"; - } - } - my $nargs = @args; - - # Determine which form to use; pad args with zeros. - my $asm = "sysvicall6"; - if ($nonblock) { - $asm = "rawSysvicall6"; - } - if(@args <= 6) { - while(@args < 6) { - push @args, "0"; - } - } else { - print STDERR "$ARGV:$.: too many arguments to system call\n"; - } - - # Actual call. - my $args = join(', ', @args); - my $call = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $args)"; - - # Assign return values. - my $body = ""; - my $failexpr = ""; - my @ret = ("_", "_", "_"); - my @pout= (); - my $do_errno = 0; - for(my $i=0; $i<@out; $i++) { - my $p = $out[$i]; - my ($name, $type) = parseparam($p); - my $reg = ""; - if($name eq "err") { - $reg = "e1"; - $ret[2] = $reg; - $do_errno = 1; - } else { - $reg = sprintf("r%d", $i); - $ret[$i] = $reg; - } - if($type eq "bool") { - $reg = "$reg != 0"; - } - if($type eq "int64" && $_32bit ne "") { - # 64-bit number in r1:r0 or r0:r1. - if($i+2 > @out) { - print STDERR "$ARGV:$.: not enough registers for int64 return\n"; - } - if($_32bit eq "big-endian") { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); - } else { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); - } - $ret[$i] = sprintf("r%d", $i); - $ret[$i+1] = sprintf("r%d", $i+1); - } - if($reg ne "e1") { - $body .= "\t$name = $type($reg)\n"; - } - } - if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { - $text .= "\t$call\n"; - } else { - $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; - } - $text .= $body; - - if ($do_errno) { - $text .= "\tif e1 != 0 {\n"; - $text .= "\t\terr = e1\n"; - $text .= "\t}\n"; - } - $text .= "\treturn\n"; - $text .= "}\n"; -} - -if($errors) { - exit 1; -} - -print < "net.inet", - "net.inet.ipproto" => "net.inet", - "net.inet6.ipv6proto" => "net.inet6", - "net.inet6.ipv6" => "net.inet6.ip6", - "net.inet.icmpv6" => "net.inet6.icmp6", - "net.inet6.divert6" => "net.inet6.divert", - "net.inet6.tcp6" => "net.inet.tcp", - "net.inet6.udp6" => "net.inet.udp", - "mpls" => "net.mpls", - "swpenc" => "vm.swapencrypt" -); - -# Node mappings -my %node_map = ( - "net.inet.ip.ifq" => "net.ifq", - "net.inet.pfsync" => "net.pfsync", - "net.mpls.ifq" => "net.ifq" -); - -my $ctlname; -my %mib = (); -my %sysctl = (); -my $node; - -sub debug() { - print STDERR "$_[0]\n" if $debug; -} - -# Walk the MIB and build a sysctl name to OID mapping. -sub build_sysctl() { - my ($node, $name, $oid) = @_; - my %node = %{$node}; - my @oid = @{$oid}; - - foreach my $key (sort keys %node) { - my @node = @{$node{$key}}; - my $nodename = $name.($name ne '' ? '.' : '').$key; - my @nodeoid = (@oid, $node[0]); - if ($node[1] eq 'CTLTYPE_NODE') { - if (exists $node_map{$nodename}) { - $node = \%mib; - $ctlname = $node_map{$nodename}; - foreach my $part (split /\./, $ctlname) { - $node = \%{@{$$node{$part}}[2]}; - } - } else { - $node = $node[2]; - } - &build_sysctl($node, $nodename, \@nodeoid); - } elsif ($node[1] ne '') { - $sysctl{$nodename} = \@nodeoid; - } - } -} - -foreach my $ctl (@ctls) { - $ctls{$ctl} = $ctl; -} - -# Build MIB -foreach my $header (@headers) { - &debug("Processing $header..."); - open HEADER, "/usr/include/$header" || - print STDERR "Failed to open $header\n"; - while (
) { - if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ || - $_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ || - $_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) { - if ($1 eq 'CTL_NAMES') { - # Top level. - $node = \%mib; - } else { - # Node. - my $nodename = lc($2); - if ($header =~ /^netinet\//) { - $ctlname = "net.inet.$nodename"; - } elsif ($header =~ /^netinet6\//) { - $ctlname = "net.inet6.$nodename"; - } elsif ($header =~ /^net\//) { - $ctlname = "net.$nodename"; - } else { - $ctlname = "$nodename"; - $ctlname =~ s/^(fs|net|kern)_/$1\./; - } - if (exists $ctl_map{$ctlname}) { - $ctlname = $ctl_map{$ctlname}; - } - if (not exists $ctls{$ctlname}) { - &debug("Ignoring $ctlname..."); - next; - } - - # Walk down from the top of the MIB. - $node = \%mib; - foreach my $part (split /\./, $ctlname) { - if (not exists $$node{$part}) { - &debug("Missing node $part"); - $$node{$part} = [ 0, '', {} ]; - } - $node = \%{@{$$node{$part}}[2]}; - } - } - - # Populate current node with entries. - my $i = -1; - while (defined($_) && $_ !~ /^}/) { - $_ =
; - $i++ if $_ =~ /{.*}/; - next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/; - $$node{$1} = [ $i, $2, {} ]; - } - } - } - close HEADER; -} - -&build_sysctl(\%mib, "", []); - -print <){ - if(/^#define\s+SYS_(\w+)\s+([0-9]+)/){ - my $name = $1; - my $num = $2; - $name =~ y/a-z/A-Z/; - print " SYS_$name = $num;" - } -} - -print <){ - if(/^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$/){ - my $num = $1; - my $proto = $2; - my $name = "SYS_$3"; - $name =~ y/a-z/A-Z/; - - # There are multiple entries for enosys and nosys, so comment them out. - if($name =~ /^SYS_E?NOSYS$/){ - $name = "// $name"; - } - if($name eq 'SYS_SYS_EXIT'){ - $name = 'SYS_EXIT'; - } - - print " $name = $num; // $proto\n"; - } -} - -print <){ - if(/^([0-9]+)\s+\S+\s+STD\s+({ \S+\s+(\w+).*)$/){ - my $num = $1; - my $proto = $2; - my $name = "SYS_$3"; - $name =~ y/a-z/A-Z/; - - # There are multiple entries for enosys and nosys, so comment them out. - if($name =~ /^SYS_E?NOSYS$/){ - $name = "// $name"; - } - if($name eq 'SYS_SYS_EXIT'){ - $name = 'SYS_EXIT'; - } - - print " $name = $num; // $proto\n"; - } -} - -print <){ - if($line =~ /^(.*)\\$/) { - # Handle continuation - $line = $1; - $_ =~ s/^\s+//; - $line .= $_; - } else { - # New line - $line = $_; - } - next if $line =~ /\\$/; - if($line =~ /^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$/) { - my $num = $1; - my $proto = $6; - my $compat = $8; - my $name = "$7_$9"; - - $name = "$7_$11" if $11 ne ''; - $name =~ y/a-z/A-Z/; - - if($compat eq '' || $compat eq '13' || $compat eq '30' || $compat eq '50') { - print " $name = $num; // $proto\n"; - } - } -} - -print <){ - if(/^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$/){ - my $num = $1; - my $proto = $3; - my $name = $4; - $name =~ y/a-z/A-Z/; - - # There are multiple entries for enosys and nosys, so comment them out. - if($name =~ /^SYS_E?NOSYS$/){ - $name = "// $name"; - } - if($name eq 'SYS_SYS_EXIT'){ - $name = 'SYS_EXIT'; - } - - print " $name = $num; // $proto\n"; - } -} - -print < 0 { - return !contains(args, pkg) - } - return contains(strings.Split(*excludeList, ","), pkg) -} - -// TODO: -// - Better version handling. -// - Generate tables for the core unicode package? -// - Add generation for encodings. This requires some retooling here and there. -// - Running repo-wide "long" tests. - -var vprintf = fmt.Printf - -func main() { - gen.Init() - args = flag.Args() - if !*verbose { - // Set vprintf to a no-op. - vprintf = func(string, ...interface{}) (int, error) { return 0, nil } - } - - // TODO: create temporary cache directory to load files and create and set - // a "cache" option if the user did not specify the UNICODE_DIR environment - // variable. This will prevent duplicate downloads and also will enable long - // tests, which really need to be run after each generated package. - - updateCore := *doCore - if gen.UnicodeVersion() != unicode.Version { - fmt.Printf("Requested Unicode version %s; core unicode version is %s.\n", - gen.UnicodeVersion(), - unicode.Version) - // TODO: use collate to compare. Simple comparison will work, though, - // until Unicode reaches version 10. To avoid circular dependencies, we - // could use the NumericWeighter without using package collate using a - // trivial Weighter implementation. - if gen.UnicodeVersion() < unicode.Version && !*force { - os.Exit(2) - } - updateCore = true - } - - var unicode = &dependency{} - if updateCore { - fmt.Printf("Updating core to version %s...\n", gen.UnicodeVersion()) - unicode = generate("unicode") - - // Test some users of the unicode packages, especially the ones that - // keep a mirrored table. These may need to be corrected by hand. - generate("regexp", unicode) - generate("strconv", unicode) // mimics Unicode table - generate("strings", unicode) - generate("testing", unicode) // mimics Unicode table - } - - var ( - cldr = generate("./unicode/cldr", unicode) - language = generate("./language", cldr) - internal = generate("./internal", unicode, language) - norm = generate("./unicode/norm", unicode) - rangetable = generate("./unicode/rangetable", unicode) - cases = generate("./cases", unicode, norm, language, rangetable) - width = generate("./width", unicode) - bidi = generate("./unicode/bidi", unicode, norm, rangetable) - mib = generate("./encoding/internal/identifier", unicode) - _ = generate("./encoding/htmlindex", unicode, language, mib) - _ = generate("./encoding/ianaindex", unicode, language, mib) - _ = generate("./secure/precis", unicode, norm, rangetable, cases, width, bidi) - _ = generate("./currency", unicode, cldr, language, internal) - _ = generate("./internal/number", unicode, cldr, language, internal) - _ = generate("./feature/plural", unicode, cldr, language, internal) - _ = generate("./internal/export/idna", unicode, bidi, norm) - _ = generate("./language/display", unicode, cldr, language, internal) - _ = generate("./collate", unicode, norm, cldr, language, rangetable) - _ = generate("./search", unicode, norm, cldr, language, rangetable) - ) - all.Wait() - - // Copy exported packages to the destination golang.org repo. - copyExported("golang.org/x/net/idna") - - if updateCore { - copyVendored() - } - - if hasErrors { - fmt.Println("FAIL") - os.Exit(1) - } - vprintf("SUCCESS\n") -} - -var ( - all sync.WaitGroup - hasErrors bool -) - -type dependency struct { - sync.WaitGroup - hasErrors bool -} - -func generate(pkg string, deps ...*dependency) *dependency { - var wg dependency - if exclude(pkg) { - return &wg - } - wg.Add(1) - all.Add(1) - go func() { - defer wg.Done() - defer all.Done() - // Wait for dependencies to finish. - for _, d := range deps { - d.Wait() - if d.hasErrors && !*force { - fmt.Printf("--- ABORT: %s\n", pkg) - wg.hasErrors = true - return - } - } - vprintf("=== GENERATE %s\n", pkg) - args := []string{"generate"} - if *verbose { - args = append(args, "-v") - } - args = append(args, pkg) - cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...) - w := &bytes.Buffer{} - cmd.Stderr = w - cmd.Stdout = w - if err := cmd.Run(); err != nil { - fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(w), err) - hasErrors = true - wg.hasErrors = true - return - } - - vprintf("=== TEST %s\n", pkg) - args[0] = "test" - cmd = exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...) - wt := &bytes.Buffer{} - cmd.Stderr = wt - cmd.Stdout = wt - if err := cmd.Run(); err != nil { - fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(wt), err) - hasErrors = true - wg.hasErrors = true - return - } - vprintf("--- SUCCESS: %s\n\t%v\n", pkg, indent(w)) - fmt.Print(wt.String()) - }() - return &wg -} - -// copyExported copies a package in x/text/internal/export to the -// destination repository. -func copyExported(p string) { - copyPackage( - filepath.Join("internal", "export", path.Base(p)), - filepath.Join("..", filepath.FromSlash(p[len("golang.org/x"):])), - "golang.org/x/text/internal/export/"+path.Base(p), - p) -} - -// copyVendored copies packages used by Go core into the vendored directory. -func copyVendored() { - root := filepath.Join(build.Default.GOROOT, filepath.FromSlash("src/vendor/golang_org/x")) - - err := filepath.Walk(root, func(dir string, info os.FileInfo, err error) error { - if err != nil || !info.IsDir() || root == dir { - return err - } - src := dir[len(root)+1:] - const slash = string(filepath.Separator) - if c := strings.Split(src, slash); c[0] == "text" { - // Copy a text repo package from its normal location. - src = strings.Join(c[1:], slash) - } else { - // Copy the vendored package if it exists in the export directory. - src = filepath.Join("internal", "export", filepath.Base(src)) - } - copyPackage(src, dir, "golang.org", "golang_org") - return nil - }) - if err != nil { - fmt.Printf("Seeding directory %s has failed %v:", root, err) - os.Exit(1) - } -} - -// goGenRE is used to remove go:generate lines. -var goGenRE = regexp.MustCompile("//go:generate[^\n]*\n") - -// copyPackage copies relevant files from a directory in x/text to the -// destination package directory. The destination package is assumed to have -// the same name. For each copied file go:generate lines are removed and -// and package comments are rewritten to the new path. -func copyPackage(dirSrc, dirDst, search, replace string) { - err := filepath.Walk(dirSrc, func(file string, info os.FileInfo, err error) error { - base := filepath.Base(file) - if err != nil || info.IsDir() || - !strings.HasSuffix(base, ".go") || - strings.HasSuffix(base, "_test.go") && !strings.HasPrefix(base, "example") || - // Don't process subdirectories. - filepath.Dir(file) != dirSrc { - return nil - } - b, err := ioutil.ReadFile(file) - if err != nil || bytes.Contains(b, []byte("\n// +build ignore")) { - return err - } - // Fix paths. - b = bytes.Replace(b, []byte(search), []byte(replace), -1) - // Remove go:generate lines. - b = goGenRE.ReplaceAllLiteral(b, nil) - comment := "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\n" - if *doCore { - comment = "// Code generated by running \"go run gen.go -core\" in golang.org/x/text. DO NOT EDIT.\n\n" - } - if !bytes.HasPrefix(b, []byte(comment)) { - b = append([]byte(comment), b...) - } - if b, err = format.Source(b); err != nil { - fmt.Println("Failed to format file:", err) - os.Exit(1) - } - file = filepath.Join(dirDst, base) - vprintf("=== COPY %s\n", file) - return ioutil.WriteFile(file, b, 0666) - }) - if err != nil { - fmt.Println("Copying exported files failed:", err) - os.Exit(1) - } -} - -func contains(a []string, s string) bool { - for _, e := range a { - if s == e { - return true - } - } - return false -} - -func indent(b *bytes.Buffer) string { - return strings.Replace(strings.TrimSpace(b.String()), "\n", "\n\t", -1) -} diff --git a/vendor/golang.org/x/text/secure/doc.go b/vendor/golang.org/x/text/secure/doc.go deleted file mode 100644 index e531c3543..000000000 --- a/vendor/golang.org/x/text/secure/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// secure is a repository of text security related packages. -package secure // import "golang.org/x/text/secure" diff --git a/vendor/golang.org/x/text/unicode/doc.go b/vendor/golang.org/x/text/unicode/doc.go deleted file mode 100644 index e8f1032d0..000000000 --- a/vendor/golang.org/x/text/unicode/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// unicode holds packages with implementations of Unicode standards that are -// mostly used as building blocks for other packages in golang.org/x/text, -// layout engines, or are otherwise more low-level in nature. -package unicode diff --git a/vendor/google.golang.org/genproto/regen.go b/vendor/google.golang.org/genproto/regen.go deleted file mode 100644 index 9c906f209..000000000 --- a/vendor/google.golang.org/genproto/regen.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build ignore - -// Regen.go regenerates the genproto repository. -// -// Regen.go recursively walks through each directory named by given arguments, -// looking for all .proto files. (Symlinks are not followed.) -// If the pkg_prefix flag is not an empty string, -// any proto file without `go_package` option -// or whose option does not begin with the prefix is ignored. -// Protoc is executed on remaining files, -// one invocation per set of files declaring the same Go package. -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "strings" -) - -var goPkgOptRe = regexp.MustCompile(`(?m)^option go_package = (.*);`) - -func usage() { - fmt.Fprintln(os.Stderr, `usage: go run regen.go -go_out=path/to/output [-pkg_prefix=pkg/prefix] roots... - -Most users will not need to run this file directly. -To regenerate this repository, run regen.sh instead.`) - flag.PrintDefaults() -} - -func main() { - goOutDir := flag.String("go_out", "", "go_out argument to pass to protoc-gen-go") - pkgPrefix := flag.String("pkg_prefix", "", "only include proto files with go_package starting with this prefix") - flag.Usage = usage - flag.Parse() - - if *goOutDir == "" { - log.Fatal("need go_out flag") - } - - pkgFiles := make(map[string][]string) - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.Mode().IsRegular() || !strings.HasSuffix(path, ".proto") { - return nil - } - pkg, err := goPkg(path) - if err != nil { - return err - } - pkgFiles[pkg] = append(pkgFiles[pkg], path) - return nil - } - for _, root := range flag.Args() { - if err := filepath.Walk(root, walkFn); err != nil { - log.Fatal(err) - } - } - for pkg, fnames := range pkgFiles { - if !strings.HasPrefix(pkg, *pkgPrefix) { - continue - } - if out, err := protoc(*goOutDir, flag.Args(), fnames); err != nil { - log.Fatalf("error executing protoc: %s\n%s", err, out) - } - } -} - -// goPkg reports the import path declared in the given file's -// `go_package` option. If the option is missing, goPkg returns empty string. -func goPkg(fname string) (string, error) { - content, err := ioutil.ReadFile(fname) - if err != nil { - return "", err - } - - var pkgName string - if match := goPkgOptRe.FindSubmatch(content); len(match) > 0 { - pn, err := strconv.Unquote(string(match[1])) - if err != nil { - return "", err - } - pkgName = pn - } - if p := strings.IndexRune(pkgName, ';'); p > 0 { - pkgName = pkgName[:p] - } - return pkgName, nil -} - -// protoc executes the "protoc" command on files named in fnames, -// passing go_out and include flags specified in goOut and includes respectively. -// protoc returns combined output from stdout and stderr. -func protoc(goOut string, includes, fnames []string) ([]byte, error) { - args := []string{"--go_out=plugins=grpc:" + goOut} - for _, inc := range includes { - args = append(args, "-I", inc) - } - args = append(args, fnames...) - return exec.Command("protoc", args...).CombinedOutput() -} diff --git a/vendor/google.golang.org/grpc/grpclb/grpclb_server_generated.go b/vendor/google.golang.org/grpc/grpclb/grpclb_server_generated.go deleted file mode 100644 index 3cf1ac858..000000000 --- a/vendor/google.golang.org/grpc/grpclb/grpclb_server_generated.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// This file contains the generated server side code. -// It's only used for grpclb testing. - -package grpclb - -import ( - "google.golang.org/grpc" - lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1" -) - -// Server API for LoadBalancer service - -type loadBalancerServer interface { - // Bidirectional rpc to get a list of servers. - BalanceLoad(*loadBalancerBalanceLoadServer) error -} - -func registerLoadBalancerServer(s *grpc.Server, srv loadBalancerServer) { - s.RegisterService( - &grpc.ServiceDesc{ - ServiceName: "grpc.lb.v1.LoadBalancer", - HandlerType: (*loadBalancerServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "BalanceLoad", - Handler: balanceLoadHandler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpclb.proto", - }, srv) -} - -func balanceLoadHandler(srv interface{}, stream grpc.ServerStream) error { - return srv.(loadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) -} - -type loadBalancerBalanceLoadServer struct { - grpc.ServerStream -} - -func (x *loadBalancerBalanceLoadServer) Send(m *lbpb.LoadBalanceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadServer) Recv() (*lbpb.LoadBalanceRequest, error) { - m := new(lbpb.LoadBalanceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} diff --git a/vendor/gopkg.in/ns1/ns1-go.v2/doc.go b/vendor/gopkg.in/ns1/ns1-go.v2/doc.go deleted file mode 100644 index ec0218e19..000000000 --- a/vendor/gopkg.in/ns1/ns1-go.v2/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package ns1 is the NS1 golang SDK. -// -// To understand the REST models and terminology, -// please visit the ns1 web page: -// -// https://ns1.com/ -// -package ns1 diff --git a/vendor/gopkg.in/ns1/ns1-go.v2/rest/model/doc.go b/vendor/gopkg.in/ns1/ns1-go.v2/rest/model/doc.go deleted file mode 100644 index c61eb78c5..000000000 --- a/vendor/gopkg.in/ns1/ns1-go.v2/rest/model/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package model defines structures for interacting with the NS1 API. -package model diff --git a/vendor/gopkg.in/ns1/ns1-go.v2/rest/model/stat.go b/vendor/gopkg.in/ns1/ns1-go.v2/rest/model/stat.go deleted file mode 100644 index 3b319c2d8..000000000 --- a/vendor/gopkg.in/ns1/ns1-go.v2/rest/model/stat.go +++ /dev/null @@ -1,15 +0,0 @@ -package model - -// // GetQPSStats returns current queries per second (QPS) for the account -// func (c APIClient) GetQPSStats() (v float64, err error) { -// var s map[string]float64 -// _, err = c.doHTTPUnmarshal("GET", "https://api.nsone.net/v1/stats/qps", nil, &s) -// if err != nil { -// return v, err -// } -// v, found := s["qps"] -// if !found { -// return v, errors.New("Could not find 'qps' key in returned data") -// } -// return v, nil -// } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go new file mode 100644 index 000000000..a74526dae --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go @@ -0,0 +1,110 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/client-go/pkg/api" + unversioned "k8s.io/client-go/pkg/api/unversioned" + v1 "k8s.io/client-go/pkg/api/v1" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + labels "k8s.io/client-go/pkg/labels" + watch "k8s.io/client-go/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeThirdPartyResources implements ThirdPartyResourceInterface +type FakeThirdPartyResources struct { + Fake *FakeExtensionsV1beta1 +} + +var thirdpartyresourcesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "thirdpartyresources"} + +func (c *FakeThirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(thirdpartyresourcesResource, thirdPartyResource), &v1beta1.ThirdPartyResource{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ThirdPartyResource), err +} + +func (c *FakeThirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(thirdpartyresourcesResource, thirdPartyResource), &v1beta1.ThirdPartyResource{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ThirdPartyResource), err +} + +func (c *FakeThirdPartyResources) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(thirdpartyresourcesResource, name), &v1beta1.ThirdPartyResource{}) + return err +} + +func (c *FakeThirdPartyResources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(thirdpartyresourcesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ThirdPartyResourceList{}) + return err +} + +func (c *FakeThirdPartyResources) Get(name string) (result *v1beta1.ThirdPartyResource, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(thirdpartyresourcesResource, name), &v1beta1.ThirdPartyResource{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ThirdPartyResource), err +} + +func (c *FakeThirdPartyResources) List(opts v1.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(thirdpartyresourcesResource, opts), &v1beta1.ThirdPartyResourceList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ThirdPartyResourceList{} + for _, item := range obj.(*v1beta1.ThirdPartyResourceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested thirdPartyResources. +func (c *FakeThirdPartyResources) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(thirdpartyresourcesResource, opts)) +} + +// Patch applies the patch and returns the patched thirdPartyResource. +func (c *FakeThirdPartyResources) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(thirdpartyresourcesResource, name, data, subresources...), &v1beta1.ThirdPartyResource{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ThirdPartyResource), err +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go deleted file mode 100644 index 12331f6e6..000000000 --- a/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/howeyc/gopass" - clientauth "k8s.io/client-go/tools/auth" -) - -// AuthLoaders are used to build clientauth.Info objects. -type AuthLoader interface { - // LoadAuth takes a path to a config file and can then do anything it needs in order to return a valid clientauth.Info - LoadAuth(path string) (*clientauth.Info, error) -} - -// default implementation of an AuthLoader -type defaultAuthLoader struct{} - -// LoadAuth for defaultAuthLoader simply delegates to clientauth.LoadFromFile -func (*defaultAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { - return clientauth.LoadFromFile(path) -} - -type PromptingAuthLoader struct { - reader io.Reader -} - -// LoadAuth parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. -func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { - // Prompt for user/pass and write a file if none exists. - if _, err := os.Stat(path); os.IsNotExist(err) { - authPtr, err := a.Prompt() - auth := *authPtr - if err != nil { - return nil, err - } - data, err := json.Marshal(auth) - if err != nil { - return &auth, err - } - err = ioutil.WriteFile(path, data, 0600) - return &auth, err - } - authPtr, err := clientauth.LoadFromFile(path) - if err != nil { - return nil, err - } - return authPtr, nil -} - -// Prompt pulls the user and password from a reader -func (a *PromptingAuthLoader) Prompt() (*clientauth.Info, error) { - var err error - auth := &clientauth.Info{} - auth.User, err = promptForString("Username", a.reader, true) - if err != nil { - return nil, err - } - auth.Password, err = promptForString("Password", nil, false) - if err != nil { - return nil, err - } - return auth, nil -} - -func promptForString(field string, r io.Reader, show bool) (result string, err error) { - fmt.Printf("Please enter %s: ", field) - if show { - _, err = fmt.Fscan(r, &result) - } else { - var data []byte - data, err = gopass.GetPasswdMasked() - result = string(data) - } - return result, err -} - -// NewPromptingAuthLoader is an AuthLoader that parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. -func NewPromptingAuthLoader(reader io.Reader) *PromptingAuthLoader { - return &PromptingAuthLoader{reader} -} - -// NewDefaultAuthLoader returns a default implementation of an AuthLoader that only reads from a config file -func NewDefaultAuthLoader() AuthLoader { - return &defaultAuthLoader{} -} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go deleted file mode 100644 index 61bda9f6b..000000000 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ /dev/null @@ -1,501 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "strings" - - "github.com/golang/glog" - "github.com/imdario/mergo" - - "strconv" - "time" - - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/rest" - clientauth "k8s.io/client-go/tools/auth" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -var ( - // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields - // DEPRECATED will be replaced - ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()} - // DefaultClientConfig represents the legacy behavior of this package for defaulting - // DEPRECATED will be replace - DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{ - ClusterDefaults: ClusterDefaults, - }, nil, NewDefaultClientConfigLoadingRules()} -) - -// getDefaultServer returns a default setting for DefaultClientConfig -// DEPRECATED -func getDefaultServer() string { - if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 { - return server - } - return "http://localhost:8080" -} - -// ClientConfig is used to make it easy to get an api server client -type ClientConfig interface { - // RawConfig returns the merged result of all overrides - RawConfig() (clientcmdapi.Config, error) - // ClientConfig returns a complete client config - ClientConfig() (*rest.Config, error) - // Namespace returns the namespace resulting from the merged - // result of all overrides and a boolean indicating if it was - // overridden - Namespace() (string, bool, error) - // ConfigAccess returns the rules for loading/persisting the config. - ConfigAccess() ConfigAccess -} - -type PersistAuthProviderConfigForUser func(user string) rest.AuthProviderConfigPersister - -// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information -type DirectClientConfig struct { - config clientcmdapi.Config - contextName string - overrides *ConfigOverrides - fallbackReader io.Reader - configAccess ConfigAccess -} - -// NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name -func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig { - return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules()} -} - -// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information -func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig { - return &DirectClientConfig{config, contextName, overrides, nil, configAccess} -} - -// NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags -func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig { - return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess} -} - -func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { - return config.config, nil -} - -// ClientConfig implements ClientConfig -func (config *DirectClientConfig) ClientConfig() (*rest.Config, error) { - // check that getAuthInfo, getContext, and getCluster do not return an error. - // Do this before checking if the curent config is usable in the event that an - // AuthInfo, Context, or Cluster config with user-defined names are not found. - // This provides a user with the immediate cause for error if one is found - configAuthInfo, err := config.getAuthInfo() - if err != nil { - return nil, err - } - - _, err = config.getContext() - if err != nil { - return nil, err - } - - configClusterInfo, err := config.getCluster() - if err != nil { - return nil, err - } - - if err := config.ConfirmUsable(); err != nil { - return nil, err - } - - clientConfig := &rest.Config{} - clientConfig.Host = configClusterInfo.Server - - if len(config.overrides.Timeout) > 0 { - if i, err := strconv.ParseInt(config.overrides.Timeout, 10, 64); err == nil && i >= 0 { - clientConfig.Timeout = time.Duration(i) * time.Second - } else if requestTimeout, err := time.ParseDuration(config.overrides.Timeout); err == nil { - clientConfig.Timeout = requestTimeout - } else { - return nil, fmt.Errorf("Invalid value for option '--request-timeout'. Value must be a single integer, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)") - } - } - - if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { - u.RawQuery = "" - u.Fragment = "" - clientConfig.Host = u.String() - } - if len(configAuthInfo.Impersonate) > 0 { - clientConfig.Impersonate = configAuthInfo.Impersonate - } - - // only try to read the auth information if we are secure - if rest.IsConfigTransportTLS(*clientConfig) { - var err error - - // mergo is a first write wins for map value and a last writing wins for interface values - // NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a. - // Our mergo.Merge version is older than this change. - var persister rest.AuthProviderConfigPersister - if config.configAccess != nil { - authInfoName, _ := config.getAuthInfoName() - persister = PersisterForUser(config.configAccess, authInfoName) - } - userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) - if err != nil { - return nil, err - } - mergo.Merge(clientConfig, userAuthPartialConfig) - - serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) - if err != nil { - return nil, err - } - mergo.Merge(clientConfig, serverAuthPartialConfig) - } - - return clientConfig, nil -} - -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for the server identification -// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. load the ~/.kubernetes_auth file as a default -func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*rest.Config, error) { - mergedConfig := &rest.Config{} - - // configClusterInfo holds the information identify the server provided by .kubeconfig - configClientConfig := &rest.Config{} - configClientConfig.CAFile = configClusterInfo.CertificateAuthority - configClientConfig.CAData = configClusterInfo.CertificateAuthorityData - configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify - mergo.Merge(mergedConfig, configClientConfig) - - return mergedConfig, nil -} - -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for user identifcation -// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file -// 4. if there is not enough information to identify the user, prompt if possible -func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig rest.AuthProviderConfigPersister) (*rest.Config, error) { - mergedConfig := &rest.Config{} - - // blindly overwrite existing values based on precedence - if len(configAuthInfo.Token) > 0 { - mergedConfig.BearerToken = configAuthInfo.Token - } else if len(configAuthInfo.TokenFile) > 0 { - tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) - if err != nil { - return nil, err - } - mergedConfig.BearerToken = string(tokenBytes) - } - if len(configAuthInfo.Impersonate) > 0 { - mergedConfig.Impersonate = configAuthInfo.Impersonate - } - if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { - mergedConfig.CertFile = configAuthInfo.ClientCertificate - mergedConfig.CertData = configAuthInfo.ClientCertificateData - mergedConfig.KeyFile = configAuthInfo.ClientKey - mergedConfig.KeyData = configAuthInfo.ClientKeyData - } - if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { - mergedConfig.Username = configAuthInfo.Username - mergedConfig.Password = configAuthInfo.Password - } - if configAuthInfo.AuthProvider != nil { - mergedConfig.AuthProvider = configAuthInfo.AuthProvider - mergedConfig.AuthConfigPersister = persistAuthConfig - } - - // if there still isn't enough information to authenticate the user, try prompting - if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) { - prompter := NewPromptingAuthLoader(fallbackReader) - promptedAuthInfo, err := prompter.Prompt() - if err != nil { - return nil, err - } - promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo) - previouslyMergedConfig := mergedConfig - mergedConfig = &rest.Config{} - mergo.Merge(mergedConfig, promptedConfig) - mergo.Merge(mergedConfig, previouslyMergedConfig) - } - - return mergedConfig, nil -} - -// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information -func makeUserIdentificationConfig(info clientauth.Info) *rest.Config { - config := &rest.Config{} - config.Username = info.User - config.Password = info.Password - config.CertFile = info.CertFile - config.KeyFile = info.KeyFile - config.BearerToken = info.BearerToken - return config -} - -// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information -func makeServerIdentificationConfig(info clientauth.Info) rest.Config { - config := rest.Config{} - config.CAFile = info.CAFile - if info.Insecure != nil { - config.Insecure = *info.Insecure - } - return config -} - -func canIdentifyUser(config rest.Config) bool { - return len(config.Username) > 0 || - (len(config.CertFile) > 0 || len(config.CertData) > 0) || - len(config.BearerToken) > 0 || - config.AuthProvider != nil -} - -// Namespace implements ClientConfig -func (config *DirectClientConfig) Namespace() (string, bool, error) { - if err := config.ConfirmUsable(); err != nil { - return "", false, err - } - - configContext, err := config.getContext() - if err != nil { - return "", false, err - } - - if len(configContext.Namespace) == 0 { - return api.NamespaceDefault, false, nil - } - - overridden := false - if config.overrides != nil && config.overrides.Context.Namespace != "" { - overridden = true - } - return configContext.Namespace, overridden, nil -} - -// ConfigAccess implements ClientConfig -func (config *DirectClientConfig) ConfigAccess() ConfigAccess { - return config.configAccess -} - -// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, -// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. -func (config *DirectClientConfig) ConfirmUsable() error { - validationErrors := make([]error, 0) - - var contextName string - if len(config.contextName) != 0 { - contextName = config.contextName - } else { - contextName = config.config.CurrentContext - } - - if len(contextName) > 0 { - _, exists := config.config.Contexts[contextName] - if !exists { - validationErrors = append(validationErrors, &errContextNotFound{contextName}) - } - } - - authInfoName, _ := config.getAuthInfoName() - authInfo, _ := config.getAuthInfo() - validationErrors = append(validationErrors, validateAuthInfo(authInfoName, authInfo)...) - clusterName, _ := config.getClusterName() - cluster, _ := config.getCluster() - validationErrors = append(validationErrors, validateClusterInfo(clusterName, cluster)...) - // when direct client config is specified, and our only error is that no server is defined, we should - // return a standard "no config" error - if len(validationErrors) == 1 && validationErrors[0] == ErrEmptyCluster { - return newErrConfigurationInvalid([]error{ErrEmptyConfig}) - } - return newErrConfigurationInvalid(validationErrors) -} - -// getContextName returns the default, or user-set context name, and a boolean that indicates -// whether the default context name has been overwritten by a user-set flag, or left as its default value -func (config *DirectClientConfig) getContextName() (string, bool) { - if len(config.overrides.CurrentContext) != 0 { - return config.overrides.CurrentContext, true - } - if len(config.contextName) != 0 { - return config.contextName, false - } - - return config.config.CurrentContext, false -} - -// getAuthInfoName returns a string containing the current authinfo name for the current context, -// and a boolean indicating whether the default authInfo name is overwritten by a user-set flag, or -// left as its default value -func (config *DirectClientConfig) getAuthInfoName() (string, bool) { - if len(config.overrides.Context.AuthInfo) != 0 { - return config.overrides.Context.AuthInfo, true - } - context, _ := config.getContext() - return context.AuthInfo, false -} - -// getClusterName returns a string containing the default, or user-set cluster name, and a boolean -// indicating whether the default clusterName has been overwritten by a user-set flag, or left as -// its default value -func (config *DirectClientConfig) getClusterName() (string, bool) { - if len(config.overrides.Context.Cluster) != 0 { - return config.overrides.Context.Cluster, true - } - context, _ := config.getContext() - return context.Cluster, false -} - -// getContext returns the clientcmdapi.Context, or an error if a required context is not found. -func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) { - contexts := config.config.Contexts - contextName, required := config.getContextName() - - var mergedContext clientcmdapi.Context - if configContext, exists := contexts[contextName]; exists { - mergo.Merge(&mergedContext, configContext) - } else if required { - return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName) - } - mergo.Merge(&mergedContext, config.overrides.Context) - - return mergedContext, nil -} - -// getAuthInfo returns the clientcmdapi.AuthInfo, or an error if a required auth info is not found. -func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) { - authInfos := config.config.AuthInfos - authInfoName, required := config.getAuthInfoName() - - var mergedAuthInfo clientcmdapi.AuthInfo - if configAuthInfo, exists := authInfos[authInfoName]; exists { - mergo.Merge(&mergedAuthInfo, configAuthInfo) - } else if required { - return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName) - } - mergo.Merge(&mergedAuthInfo, config.overrides.AuthInfo) - - return mergedAuthInfo, nil -} - -// getCluster returns the clientcmdapi.Cluster, or an error if a required cluster is not found. -func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) { - clusterInfos := config.config.Clusters - clusterInfoName, required := config.getClusterName() - - var mergedClusterInfo clientcmdapi.Cluster - mergo.Merge(&mergedClusterInfo, config.overrides.ClusterDefaults) - if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { - mergo.Merge(&mergedClusterInfo, configClusterInfo) - } else if required { - return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName) - } - mergo.Merge(&mergedClusterInfo, config.overrides.ClusterInfo) - // An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data - // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set" - caLen := len(config.overrides.ClusterInfo.CertificateAuthority) - caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData) - if config.overrides.ClusterInfo.InsecureSkipTLSVerify && caLen == 0 && caDataLen == 0 { - mergedClusterInfo.CertificateAuthority = "" - mergedClusterInfo.CertificateAuthorityData = nil - } - - return mergedClusterInfo, nil -} - -// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment. -type inClusterClientConfig struct{} - -var _ ClientConfig = inClusterClientConfig{} - -func (inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) { - return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters") -} - -func (inClusterClientConfig) ClientConfig() (*rest.Config, error) { - return rest.InClusterConfig() -} - -func (inClusterClientConfig) Namespace() (string, bool, error) { - // This way assumes you've set the POD_NAMESPACE environment variable using the downward API. - // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up - if ns := os.Getenv("POD_NAMESPACE"); ns != "" { - return ns, true, nil - } - - // Fall back to the namespace associated with the service account token, if available - if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { - if ns := strings.TrimSpace(string(data)); len(ns) > 0 { - return ns, true, nil - } - } - - return "default", false, nil -} - -func (inClusterClientConfig) ConfigAccess() ConfigAccess { - return NewDefaultClientConfigLoadingRules() -} - -// Possible returns true if loading an inside-kubernetes-cluster is possible. -func (inClusterClientConfig) Possible() bool { - fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token") - return os.Getenv("KUBERNETES_SERVICE_HOST") != "" && - os.Getenv("KUBERNETES_SERVICE_PORT") != "" && - err == nil && !fi.IsDir() -} - -// BuildConfigFromFlags is a helper function that builds configs from a master -// url or a kubeconfig filepath. These are passed in as command line flags for cluster -// components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath -// are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback -// to the default config. -func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*rest.Config, error) { - if kubeconfigPath == "" && masterUrl == "" { - glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") - kubeconfig, err := rest.InClusterConfig() - if err == nil { - return kubeconfig, nil - } - glog.Warning("error creating inClusterConfig, falling back to default config: ", err) - } - return NewNonInteractiveDeferredLoadingClientConfig( - &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, - &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig() -} - -// BuildConfigFromKubeconfigGetter is a helper function that builds configs from a master -// url and a kubeconfigGetter. -func BuildConfigFromKubeconfigGetter(masterUrl string, kubeconfigGetter KubeconfigGetter) (*rest.Config, error) { - // TODO: We do not need a DeferredLoader here. Refactor code and see if we can use DirectClientConfig here. - cc := NewNonInteractiveDeferredLoadingClientConfig( - &ClientConfigGetter{kubeconfigGetter: kubeconfigGetter}, - &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}) - return cc.ClientConfig() -} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go deleted file mode 100644 index 3e5aa49a3..000000000 --- a/vendor/k8s.io/client-go/tools/clientcmd/config.go +++ /dev/null @@ -1,472 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "errors" - "os" - "path" - "path/filepath" - "reflect" - "sort" - - "github.com/golang/glog" - - "k8s.io/client-go/rest" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files -type ConfigAccess interface { - // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config - GetLoadingPrecedence() []string - // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules - GetStartingConfig() (*clientcmdapi.Config, error) - // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one. - GetDefaultFilename() string - // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more - IsExplicitFile() bool - // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more - GetExplicitFile() string -} - -type PathOptions struct { - // GlobalFile is the full path to the file to load as the global (final) option - GlobalFile string - // EnvVar is the env var name that points to the list of kubeconfig files to load - EnvVar string - // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file - ExplicitFileFlag string - - // GlobalFileSubpath is an optional value used for displaying help - GlobalFileSubpath string - - LoadingRules *ClientConfigLoadingRules -} - -func (o *PathOptions) GetEnvVarFiles() []string { - if len(o.EnvVar) == 0 { - return []string{} - } - - envVarValue := os.Getenv(o.EnvVar) - if len(envVarValue) == 0 { - return []string{} - } - - return filepath.SplitList(envVarValue) -} - -func (o *PathOptions) GetLoadingPrecedence() []string { - if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { - return envVarFiles - } - - return []string{o.GlobalFile} -} - -func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) { - // don't mutate the original - loadingRules := *o.LoadingRules - loadingRules.Precedence = o.GetLoadingPrecedence() - - clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{}) - rawConfig, err := clientConfig.RawConfig() - if os.IsNotExist(err) { - return clientcmdapi.NewConfig(), nil - } - if err != nil { - return nil, err - } - - return &rawConfig, nil -} - -func (o *PathOptions) GetDefaultFilename() string { - if o.IsExplicitFile() { - return o.GetExplicitFile() - } - - if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { - if len(envVarFiles) == 1 { - return envVarFiles[0] - } - - // if any of the envvar files already exists, return it - for _, envVarFile := range envVarFiles { - if _, err := os.Stat(envVarFile); err == nil { - return envVarFile - } - } - - // otherwise, return the last one in the list - return envVarFiles[len(envVarFiles)-1] - } - - return o.GlobalFile -} - -func (o *PathOptions) IsExplicitFile() bool { - if len(o.LoadingRules.ExplicitPath) > 0 { - return true - } - - return false -} - -func (o *PathOptions) GetExplicitFile() string { - return o.LoadingRules.ExplicitPath -} - -func NewDefaultPathOptions() *PathOptions { - ret := &PathOptions{ - GlobalFile: RecommendedHomeFile, - EnvVar: RecommendedConfigPathEnvVar, - ExplicitFileFlag: RecommendedConfigPathFlag, - - GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), - - LoadingRules: NewDefaultClientConfigLoadingRules(), - } - ret.LoadingRules.DoNotResolvePaths = true - - return ret -} - -// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or -// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. -// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values -// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, -// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any -// modified element. -func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { - possibleSources := configAccess.GetLoadingPrecedence() - // sort the possible kubeconfig files so we always "lock" in the same order - // to avoid deadlock (note: this can fail w/ symlinks, but... come on). - sort.Strings(possibleSources) - for _, filename := range possibleSources { - if err := lockFile(filename); err != nil { - return err - } - defer unlockFile(filename) - } - - startingConfig, err := configAccess.GetStartingConfig() - if err != nil { - return err - } - - // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. - // Special case the test for current context and preferences since those always write to the default file. - if reflect.DeepEqual(*startingConfig, newConfig) { - // nothing to do - return nil - } - - if startingConfig.CurrentContext != newConfig.CurrentContext { - if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { - return err - } - } - - if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { - if err := writePreferences(configAccess, newConfig.Preferences); err != nil { - return err - } - } - - // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions - for key, cluster := range newConfig.Clusters { - startingCluster, exists := startingConfig.Clusters[key] - if !reflect.DeepEqual(cluster, startingCluster) || !exists { - destinationFile := cluster.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - t := *cluster - - configToWrite.Clusters[key] = &t - configToWrite.Clusters[key].LocationOfOrigin = destinationFile - if relativizePaths { - if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { - return err - } - } - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, context := range newConfig.Contexts { - startingContext, exists := startingConfig.Contexts[key] - if !reflect.DeepEqual(context, startingContext) || !exists { - destinationFile := context.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - configToWrite.Contexts[key] = context - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, authInfo := range newConfig.AuthInfos { - startingAuthInfo, exists := startingConfig.AuthInfos[key] - if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { - destinationFile := authInfo.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - t := *authInfo - configToWrite.AuthInfos[key] = &t - configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile - if relativizePaths { - if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { - return err - } - } - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, cluster := range startingConfig.Clusters { - if _, exists := newConfig.Clusters[key]; !exists { - destinationFile := cluster.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - delete(configToWrite.Clusters, key) - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, context := range startingConfig.Contexts { - if _, exists := newConfig.Contexts[key]; !exists { - destinationFile := context.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - delete(configToWrite.Contexts, key) - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, authInfo := range startingConfig.AuthInfos { - if _, exists := newConfig.AuthInfos[key]; !exists { - destinationFile := authInfo.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - delete(configToWrite.AuthInfos, key) - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - return nil -} - -func PersisterForUser(configAccess ConfigAccess, user string) rest.AuthProviderConfigPersister { - return &persister{configAccess, user} -} - -type persister struct { - configAccess ConfigAccess - user string -} - -func (p *persister) Persist(config map[string]string) error { - newConfig, err := p.configAccess.GetStartingConfig() - if err != nil { - return err - } - authInfo, ok := newConfig.AuthInfos[p.user] - if ok && authInfo.AuthProvider != nil { - authInfo.AuthProvider.Config = config - ModifyConfig(p.configAccess, *newConfig, false) - } - return nil -} - -// writeCurrentContext takes three possible paths. -// If newCurrentContext is the same as the startingConfig's current context, then we exit. -// If newCurrentContext has a value, then that value is written into the default destination file. -// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file -func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error { - if startingConfig, err := configAccess.GetStartingConfig(); err != nil { - return err - } else if startingConfig.CurrentContext == newCurrentContext { - return nil - } - - if configAccess.IsExplicitFile() { - file := configAccess.GetExplicitFile() - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - currConfig.CurrentContext = newCurrentContext - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - - if len(newCurrentContext) > 0 { - destinationFile := configAccess.GetDefaultFilename() - config, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - config.CurrentContext = newCurrentContext - - if err := WriteToFile(*config, destinationFile); err != nil { - return err - } - - return nil - } - - // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it - for _, file := range configAccess.GetLoadingPrecedence() { - if _, err := os.Stat(file); err == nil { - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - - if len(currConfig.CurrentContext) > 0 { - currConfig.CurrentContext = newCurrentContext - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - } - } - - return errors.New("no config found to write context") -} - -func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error { - if startingConfig, err := configAccess.GetStartingConfig(); err != nil { - return err - } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) { - return nil - } - - if configAccess.IsExplicitFile() { - file := configAccess.GetExplicitFile() - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - currConfig.Preferences = newPrefs - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - - for _, file := range configAccess.GetLoadingPrecedence() { - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - - if !reflect.DeepEqual(currConfig.Preferences, newPrefs) { - currConfig.Preferences = newPrefs - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - } - - return errors.New("no config found to write preferences") -} - -// getConfigFromFile tries to read a kubeconfig file and if it can't, returns an error. One exception, missing files result in empty configs, not an error. -func getConfigFromFile(filename string) (*clientcmdapi.Config, error) { - config, err := LoadFromFile(filename) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if config == nil { - config = clientcmdapi.NewConfig() - } - return config, nil -} - -// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit -func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { - config, err := getConfigFromFile(filename) - if err != nil { - glog.FatalDepth(1, err) - } - - return config -} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/doc.go deleted file mode 100644 index a4cfbd13c..000000000 --- a/vendor/k8s.io/client-go/tools/clientcmd/doc.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package clientcmd provides one stop shopping for building a working client from a fixed config, -from a .kubeconfig file, from command line flags, or from any merged combination. - -Sample usage from merged .kubeconfig files (local directory, home directory) - - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - // if you want to change the loading rules (which files in which order), you can do so here - - configOverrides := &clientcmd.ConfigOverrides{} - // if you want to change override values or bind them to flags, there are methods to help you - - kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) - config, err := kubeConfig.ClientConfig() - if err != nil { - // Do something - } - client, err := unversioned.New(config) - // ... -*/ -package clientcmd // import "k8s.io/client-go/tools/clientcmd" diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go deleted file mode 100644 index d678e7fa2..000000000 --- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ /dev/null @@ -1,609 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "reflect" - goruntime "runtime" - "strings" - - "github.com/golang/glog" - "github.com/imdario/mergo" - - "k8s.io/client-go/pkg/api/unversioned" - "k8s.io/client-go/pkg/runtime" - utilerrors "k8s.io/client-go/pkg/util/errors" - "k8s.io/client-go/pkg/util/homedir" - "k8s.io/client-go/rest" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest" -) - -const ( - RecommendedConfigPathFlag = "kubeconfig" - RecommendedConfigPathEnvVar = "KUBECONFIG" - RecommendedHomeDir = ".kube" - RecommendedFileName = "config" - RecommendedSchemaName = "schema" -) - -var RecommendedHomeFile = path.Join(homedir.HomeDir(), RecommendedHomeDir, RecommendedFileName) -var RecommendedSchemaFile = path.Join(homedir.HomeDir(), RecommendedHomeDir, RecommendedSchemaName) - -// currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions. -// Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make -// sure existing config files are migrated to their new locations properly. -func currentMigrationRules() map[string]string { - oldRecommendedHomeFile := path.Join(os.Getenv("HOME"), "/.kube/.kubeconfig") - oldRecommendedWindowsHomeFile := path.Join(os.Getenv("HOME"), RecommendedHomeDir, RecommendedFileName) - - migrationRules := map[string]string{} - migrationRules[RecommendedHomeFile] = oldRecommendedHomeFile - if goruntime.GOOS == "windows" { - migrationRules[RecommendedHomeFile] = oldRecommendedWindowsHomeFile - } - return migrationRules -} - -type ClientConfigLoader interface { - ConfigAccess - // IsDefaultConfig returns true if the returned config matches the defaults. - IsDefaultConfig(*rest.Config) bool - // Load returns the latest config - Load() (*clientcmdapi.Config, error) -} - -type KubeconfigGetter func() (*clientcmdapi.Config, error) - -type ClientConfigGetter struct { - kubeconfigGetter KubeconfigGetter -} - -// ClientConfigGetter implements the ClientConfigLoader interface. -var _ ClientConfigLoader = &ClientConfigGetter{} - -func (g *ClientConfigGetter) Load() (*clientcmdapi.Config, error) { - return g.kubeconfigGetter() -} - -func (g *ClientConfigGetter) GetLoadingPrecedence() []string { - return nil -} -func (g *ClientConfigGetter) GetStartingConfig() (*clientcmdapi.Config, error) { - return g.kubeconfigGetter() -} -func (g *ClientConfigGetter) GetDefaultFilename() string { - return "" -} -func (g *ClientConfigGetter) IsExplicitFile() bool { - return false -} -func (g *ClientConfigGetter) GetExplicitFile() string { - return "" -} -func (g *ClientConfigGetter) IsDefaultConfig(config *rest.Config) bool { - return false -} - -// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config -// Callers can put the chain together however they want, but we'd recommend: -// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath -// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present -type ClientConfigLoadingRules struct { - ExplicitPath string - Precedence []string - - // MigrationRules is a map of destination files to source files. If a destination file is not present, then the source file is checked. - // If the source file is present, then it is copied to the destination file BEFORE any further loading happens. - MigrationRules map[string]string - - // DoNotResolvePaths indicates whether or not to resolve paths with respect to the originating files. This is phrased as a negative so - // that a default object that doesn't set this will usually get the behavior it wants. - DoNotResolvePaths bool - - // DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration. - // This should match the overrides passed in to ClientConfig loader. - DefaultClientConfig ClientConfig -} - -// ClientConfigLoadingRules implements the ClientConfigLoader interface. -var _ ClientConfigLoader = &ClientConfigLoadingRules{} - -// NewDefaultClientConfigLoadingRules returns a ClientConfigLoadingRules object with default fields filled in. You are not required to -// use this constructor -func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { - chain := []string{} - - envVarFiles := os.Getenv(RecommendedConfigPathEnvVar) - if len(envVarFiles) != 0 { - chain = append(chain, filepath.SplitList(envVarFiles)...) - - } else { - chain = append(chain, RecommendedHomeFile) - } - - return &ClientConfigLoadingRules{ - Precedence: chain, - MigrationRules: currentMigrationRules(), - } -} - -// Load starts by running the MigrationRules and then -// takes the loading rules and returns a Config object based on following rules. -// if the ExplicitPath, return the unmerged explicit file -// Otherwise, return a merged config based on the Precedence slice -// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. -// Read errors or files with non-deserializable content produce errors. -// The first file to set a particular map key wins and map key's value is never changed. -// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. -// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. -// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even -// non-conflicting entries from the second file's "red-user" are discarded. -// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder -// and only absolute file paths are returned. -func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { - if err := rules.Migrate(); err != nil { - return nil, err - } - - errlist := []error{} - - kubeConfigFiles := []string{} - - // Make sure a file we were explicitly told to use exists - if len(rules.ExplicitPath) > 0 { - if _, err := os.Stat(rules.ExplicitPath); os.IsNotExist(err) { - return nil, err - } - kubeConfigFiles = append(kubeConfigFiles, rules.ExplicitPath) - - } else { - kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) - } - - kubeconfigs := []*clientcmdapi.Config{} - // read and cache the config files so that we only look at them once - for _, filename := range kubeConfigFiles { - if len(filename) == 0 { - // no work to do - continue - } - - config, err := LoadFromFile(filename) - if os.IsNotExist(err) { - // skip missing files - continue - } - if err != nil { - errlist = append(errlist, fmt.Errorf("Error loading config file \"%s\": %v", filename, err)) - continue - } - - kubeconfigs = append(kubeconfigs, config) - } - - // first merge all of our maps - mapConfig := clientcmdapi.NewConfig() - - for _, kubeconfig := range kubeconfigs { - mergo.Merge(mapConfig, kubeconfig) - } - - // merge all of the struct values in the reverse order so that priority is given correctly - // errors are not added to the list the second time - nonMapConfig := clientcmdapi.NewConfig() - for i := len(kubeconfigs) - 1; i >= 0; i-- { - kubeconfig := kubeconfigs[i] - mergo.Merge(nonMapConfig, kubeconfig) - } - - // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and - // get the values we expect. - config := clientcmdapi.NewConfig() - mergo.Merge(config, mapConfig) - mergo.Merge(config, nonMapConfig) - - if rules.ResolvePaths() { - if err := ResolveLocalPaths(config); err != nil { - errlist = append(errlist, err) - } - } - return config, utilerrors.NewAggregate(errlist) -} - -// Migrate uses the MigrationRules map. If a destination file is not present, then the source file is checked. -// If the source file is present, then it is copied to the destination file BEFORE any further loading happens. -func (rules *ClientConfigLoadingRules) Migrate() error { - if rules.MigrationRules == nil { - return nil - } - - for destination, source := range rules.MigrationRules { - if _, err := os.Stat(destination); err == nil { - // if the destination already exists, do nothing - continue - } else if os.IsPermission(err) { - // if we can't access the file, skip it - continue - } else if !os.IsNotExist(err) { - // if we had an error other than non-existence, fail - return err - } - - if sourceInfo, err := os.Stat(source); err != nil { - if os.IsNotExist(err) || os.IsPermission(err) { - // if the source file doesn't exist or we can't access it, there's no work to do. - continue - } - - // if we had an error other than non-existence, fail - return err - } else if sourceInfo.IsDir() { - return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination) - } - - in, err := os.Open(source) - if err != nil { - return err - } - defer in.Close() - out, err := os.Create(destination) - if err != nil { - return err - } - defer out.Close() - - if _, err = io.Copy(out, in); err != nil { - return err - } - } - - return nil -} - -// GetLoadingPrecedence implements ConfigAccess -func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string { - return rules.Precedence -} - -// GetStartingConfig implements ConfigAccess -func (rules *ClientConfigLoadingRules) GetStartingConfig() (*clientcmdapi.Config, error) { - clientConfig := NewNonInteractiveDeferredLoadingClientConfig(rules, &ConfigOverrides{}) - rawConfig, err := clientConfig.RawConfig() - if os.IsNotExist(err) { - return clientcmdapi.NewConfig(), nil - } - if err != nil { - return nil, err - } - - return &rawConfig, nil -} - -// GetDefaultFilename implements ConfigAccess -func (rules *ClientConfigLoadingRules) GetDefaultFilename() string { - // Explicit file if we have one. - if rules.IsExplicitFile() { - return rules.GetExplicitFile() - } - // Otherwise, first existing file from precedence. - for _, filename := range rules.GetLoadingPrecedence() { - if _, err := os.Stat(filename); err == nil { - return filename - } - } - // If none exists, use the first from precedence. - if len(rules.Precedence) > 0 { - return rules.Precedence[0] - } - return "" -} - -// IsExplicitFile implements ConfigAccess -func (rules *ClientConfigLoadingRules) IsExplicitFile() bool { - return len(rules.ExplicitPath) > 0 -} - -// GetExplicitFile implements ConfigAccess -func (rules *ClientConfigLoadingRules) GetExplicitFile() string { - return rules.ExplicitPath -} - -// IsDefaultConfig returns true if the provided configuration matches the default -func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *rest.Config) bool { - if rules.DefaultClientConfig == nil { - return false - } - defaultConfig, err := rules.DefaultClientConfig.ClientConfig() - if err != nil { - return false - } - return reflect.DeepEqual(config, defaultConfig) -} - -// LoadFromFile takes a filename and deserializes the contents into Config object -func LoadFromFile(filename string) (*clientcmdapi.Config, error) { - kubeconfigBytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - config, err := Load(kubeconfigBytes) - if err != nil { - return nil, err - } - glog.V(6).Infoln("Config loaded from file", filename) - - // set LocationOfOrigin on every Cluster, User, and Context - for key, obj := range config.AuthInfos { - obj.LocationOfOrigin = filename - config.AuthInfos[key] = obj - } - for key, obj := range config.Clusters { - obj.LocationOfOrigin = filename - config.Clusters[key] = obj - } - for key, obj := range config.Contexts { - obj.LocationOfOrigin = filename - config.Contexts[key] = obj - } - - if config.AuthInfos == nil { - config.AuthInfos = map[string]*clientcmdapi.AuthInfo{} - } - if config.Clusters == nil { - config.Clusters = map[string]*clientcmdapi.Cluster{} - } - if config.Contexts == nil { - config.Contexts = map[string]*clientcmdapi.Context{} - } - - return config, nil -} - -// Load takes a byte slice and deserializes the contents into Config object. -// Encapsulates deserialization without assuming the source is a file. -func Load(data []byte) (*clientcmdapi.Config, error) { - config := clientcmdapi.NewConfig() - // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) - if len(data) == 0 { - return config, nil - } - decoded, _, err := clientcmdlatest.Codec.Decode(data, &unversioned.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config) - if err != nil { - return nil, err - } - return decoded.(*clientcmdapi.Config), nil -} - -// WriteToFile serializes the config to yaml and writes it out to a file. If not present, it creates the file with the mode 0600. If it is present -// it stomps the contents -func WriteToFile(config clientcmdapi.Config, filename string) error { - content, err := Write(config) - if err != nil { - return err - } - dir := filepath.Dir(filename) - if _, err := os.Stat(dir); os.IsNotExist(err) { - if err = os.MkdirAll(dir, 0755); err != nil { - return err - } - } - - if err := ioutil.WriteFile(filename, content, 0600); err != nil { - return err - } - return nil -} - -func lockFile(filename string) error { - // TODO: find a way to do this with actual file locks. Will - // probably need seperate solution for windows and linux. - - // Make sure the dir exists before we try to create a lock file. - dir := filepath.Dir(filename) - if _, err := os.Stat(dir); os.IsNotExist(err) { - if err = os.MkdirAll(dir, 0755); err != nil { - return err - } - } - f, err := os.OpenFile(lockName(filename), os.O_CREATE|os.O_EXCL, 0) - if err != nil { - return err - } - f.Close() - return nil -} - -func unlockFile(filename string) error { - return os.Remove(lockName(filename)) -} - -func lockName(filename string) string { - return filename + ".lock" -} - -// Write serializes the config to yaml. -// Encapsulates serialization without assuming the destination is a file. -func Write(config clientcmdapi.Config) ([]byte, error) { - return runtime.Encode(clientcmdlatest.Codec, &config) -} - -func (rules ClientConfigLoadingRules) ResolvePaths() bool { - return !rules.DoNotResolvePaths -} - -// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin -// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without -// modification of its contents. -func ResolveLocalPaths(config *clientcmdapi.Config) error { - for _, cluster := range config.Clusters { - if len(cluster.LocationOfOrigin) == 0 { - continue - } - base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) - if err != nil { - return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) - } - - if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { - return err - } - } - for _, authInfo := range config.AuthInfos { - if len(authInfo.LocationOfOrigin) == 0 { - continue - } - base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) - if err != nil { - return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) - } - - if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { - return err - } - } - - return nil -} - -// RelativizeClusterLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already -// absolute, but any existing path will be resolved relative to LocationOfOrigin -func RelativizeClusterLocalPaths(cluster *clientcmdapi.Cluster) error { - if len(cluster.LocationOfOrigin) == 0 { - return fmt.Errorf("no location of origin for %s", cluster.Server) - } - base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) - if err != nil { - return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) - } - - if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { - return err - } - if err := RelativizePathWithNoBacksteps(GetClusterFileReferences(cluster), base); err != nil { - return err - } - - return nil -} - -// RelativizeAuthInfoLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already -// absolute, but any existing path will be resolved relative to LocationOfOrigin -func RelativizeAuthInfoLocalPaths(authInfo *clientcmdapi.AuthInfo) error { - if len(authInfo.LocationOfOrigin) == 0 { - return fmt.Errorf("no location of origin for %v", authInfo) - } - base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) - if err != nil { - return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) - } - - if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { - return err - } - if err := RelativizePathWithNoBacksteps(GetAuthInfoFileReferences(authInfo), base); err != nil { - return err - } - - return nil -} - -func RelativizeConfigPaths(config *clientcmdapi.Config, base string) error { - return RelativizePathWithNoBacksteps(GetConfigFileReferences(config), base) -} - -func ResolveConfigPaths(config *clientcmdapi.Config, base string) error { - return ResolvePaths(GetConfigFileReferences(config), base) -} - -func GetConfigFileReferences(config *clientcmdapi.Config) []*string { - refs := []*string{} - - for _, cluster := range config.Clusters { - refs = append(refs, GetClusterFileReferences(cluster)...) - } - for _, authInfo := range config.AuthInfos { - refs = append(refs, GetAuthInfoFileReferences(authInfo)...) - } - - return refs -} - -func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string { - return []*string{&cluster.CertificateAuthority} -} - -func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string { - return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey, &authInfo.TokenFile} -} - -// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory -func ResolvePaths(refs []*string, base string) error { - for _, ref := range refs { - // Don't resolve empty paths - if len(*ref) > 0 { - // Don't resolve absolute paths - if !filepath.IsAbs(*ref) { - *ref = filepath.Join(base, *ref) - } - } - } - return nil -} - -// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps. -// Any path requiring a backstep is left as-is as long it is absolute. Any non-absolute path that can't be relativized produces an error -func RelativizePathWithNoBacksteps(refs []*string, base string) error { - for _, ref := range refs { - // Don't relativize empty paths - if len(*ref) > 0 { - rel, err := MakeRelative(*ref, base) - if err != nil { - return err - } - - // if we have a backstep, don't mess with the path - if strings.HasPrefix(rel, "../") { - if filepath.IsAbs(*ref) { - continue - } - - return fmt.Errorf("%v requires backsteps and is not absolute", *ref) - } - - *ref = rel - } - } - return nil -} - -func MakeRelative(path, base string) (string, error) { - if len(path) > 0 { - rel, err := filepath.Rel(base, path) - if err != nil { - return path, err - } - return rel, nil - } - return path, nil -} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go deleted file mode 100644 index 5343509c0..000000000 --- a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "io" - "sync" - - "github.com/golang/glog" - - "k8s.io/client-go/rest" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader. -// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that -// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before -// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid -// passing extraneous information down a call stack -type DeferredLoadingClientConfig struct { - loader ClientConfigLoader - overrides *ConfigOverrides - fallbackReader io.Reader - - clientConfig ClientConfig - loadingLock sync.Mutex - - // provided for testing - icc InClusterConfig -} - -// InClusterConfig abstracts details of whether the client is running in a cluster for testing. -type InClusterConfig interface { - ClientConfig - Possible() bool -} - -// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name -func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { - return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: inClusterClientConfig{}} -} - -// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader -func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { - return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: inClusterClientConfig{}, fallbackReader: fallbackReader} -} - -func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) { - if config.clientConfig == nil { - config.loadingLock.Lock() - defer config.loadingLock.Unlock() - - if config.clientConfig == nil { - mergedConfig, err := config.loader.Load() - if err != nil { - return nil, err - } - - var mergedClientConfig ClientConfig - if config.fallbackReader != nil { - mergedClientConfig = NewInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.fallbackReader, config.loader) - } else { - mergedClientConfig = NewNonInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.loader) - } - - config.clientConfig = mergedClientConfig - } - } - - return config.clientConfig, nil -} - -func (config *DeferredLoadingClientConfig) RawConfig() (clientcmdapi.Config, error) { - mergedConfig, err := config.createClientConfig() - if err != nil { - return clientcmdapi.Config{}, err - } - - return mergedConfig.RawConfig() -} - -// ClientConfig implements ClientConfig -func (config *DeferredLoadingClientConfig) ClientConfig() (*rest.Config, error) { - mergedClientConfig, err := config.createClientConfig() - if err != nil { - return nil, err - } - - // load the configuration and return on non-empty errors and if the - // content differs from the default config - mergedConfig, err := mergedClientConfig.ClientConfig() - switch { - case err != nil: - if !IsEmptyConfig(err) { - // return on any error except empty config - return nil, err - } - case mergedConfig != nil: - // the configuration is valid, but if this is equal to the defaults we should try - // in-cluster configuration - if !config.loader.IsDefaultConfig(mergedConfig) { - return mergedConfig, nil - } - } - - // check for in-cluster configuration and use it - if config.icc.Possible() { - glog.V(4).Infof("Using in-cluster configuration") - return config.icc.ClientConfig() - } - - // return the result of the merged client config - return mergedConfig, err -} - -// Namespace implements KubeConfig -func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { - mergedKubeConfig, err := config.createClientConfig() - if err != nil { - return "", false, err - } - - ns, ok, err := mergedKubeConfig.Namespace() - // if we get an error and it is not empty config, or if the merged config defined an explicit namespace, or - // if in-cluster config is not possible, return immediately - if (err != nil && !IsEmptyConfig(err)) || ok || !config.icc.Possible() { - // return on any error except empty config - return ns, ok, err - } - - glog.V(4).Infof("Using in-cluster namespace") - - // allow the namespace from the service account token directory to be used. - return config.icc.Namespace() -} - -// ConfigAccess implements ClientConfig -func (config *DeferredLoadingClientConfig) ConfigAccess() ConfigAccess { - return config.loader -} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go deleted file mode 100644 index 965eca644..000000000 --- a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go +++ /dev/null @@ -1,206 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "strconv" - - "github.com/spf13/pflag" - - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -// ConfigOverrides holds values that should override whatever information is pulled from the actual Config object. You can't -// simply use an actual Config object, because Configs hold maps, but overrides are restricted to "at most one" -type ConfigOverrides struct { - AuthInfo clientcmdapi.AuthInfo - // ClusterDefaults are applied before the configured cluster info is loaded. - ClusterDefaults clientcmdapi.Cluster - ClusterInfo clientcmdapi.Cluster - Context clientcmdapi.Context - CurrentContext string - Timeout string -} - -// ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly -// corresponds to ConfigOverrides -type ConfigOverrideFlags struct { - AuthOverrideFlags AuthOverrideFlags - ClusterOverrideFlags ClusterOverrideFlags - ContextOverrideFlags ContextOverrideFlags - CurrentContext FlagInfo - Timeout FlagInfo -} - -// AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects -type AuthOverrideFlags struct { - ClientCertificate FlagInfo - ClientKey FlagInfo - Token FlagInfo - Impersonate FlagInfo - Username FlagInfo - Password FlagInfo -} - -// ContextOverrideFlags holds the flag names to be used for binding command line flags for Cluster objects -type ContextOverrideFlags struct { - ClusterName FlagInfo - AuthInfoName FlagInfo - Namespace FlagInfo -} - -// ClusterOverride holds the flag names to be used for binding command line flags for Cluster objects -type ClusterOverrideFlags struct { - APIServer FlagInfo - APIVersion FlagInfo - CertificateAuthority FlagInfo - InsecureSkipTLSVerify FlagInfo -} - -// FlagInfo contains information about how to register a flag. This struct is useful if you want to provide a way for an extender to -// get back a set of recommended flag names, descriptions, and defaults, but allow for customization by an extender. This makes for -// coherent extension, without full prescription -type FlagInfo struct { - // LongName is the long string for a flag. If this is empty, then the flag will not be bound - LongName string - // ShortName is the single character for a flag. If this is empty, then there will be no short flag - ShortName string - // Default is the default value for the flag - Default string - // Description is the description for the flag - Description string -} - -// BindStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered -func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) { - // you can't register a flag without a long name - if len(f.LongName) > 0 { - flags.StringVarP(target, f.LongName, f.ShortName, f.Default, f.Description) - } -} - -// BindBoolFlag binds the flag based on the provided info. If LongName == "", nothing is registered -func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) { - // you can't register a flag without a long name - if len(f.LongName) > 0 { - // try to parse Default as a bool. If it fails, assume false - boolVal, err := strconv.ParseBool(f.Default) - if err != nil { - boolVal = false - } - - flags.BoolVarP(target, f.LongName, f.ShortName, boolVal, f.Description) - } -} - -const ( - FlagClusterName = "cluster" - FlagAuthInfoName = "user" - FlagContext = "context" - FlagNamespace = "namespace" - FlagAPIServer = "server" - FlagAPIVersion = "api-version" - FlagInsecure = "insecure-skip-tls-verify" - FlagCertFile = "client-certificate" - FlagKeyFile = "client-key" - FlagCAFile = "certificate-authority" - FlagEmbedCerts = "embed-certs" - FlagBearerToken = "token" - FlagImpersonate = "as" - FlagUsername = "username" - FlagPassword = "password" - FlagTimeout = "request-timeout" -) - -// RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing -func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags { - return AuthOverrideFlags{ - ClientCertificate: FlagInfo{prefix + FlagCertFile, "", "", "Path to a client certificate file for TLS"}, - ClientKey: FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS"}, - Token: FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server"}, - Impersonate: FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation"}, - Username: FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server"}, - Password: FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server"}, - } -} - -// RecommendedClusterOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing -func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags { - return ClusterOverrideFlags{ - APIServer: FlagInfo{prefix + FlagAPIServer, "", "", "The address and port of the Kubernetes API server"}, - APIVersion: FlagInfo{prefix + FlagAPIVersion, "", "", "DEPRECATED: The API version to use when talking to the server"}, - CertificateAuthority: FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert. file for the certificate authority"}, - InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"}, - } -} - -// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing -func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags { - return ConfigOverrideFlags{ - AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix), - ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix), - ContextOverrideFlags: RecommendedContextOverrideFlags(prefix), - - CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, - Timeout: FlagInfo{prefix + FlagTimeout, "", "0", "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests."}, - } -} - -// RecommendedContextOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing -func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags { - return ContextOverrideFlags{ - ClusterName: FlagInfo{prefix + FlagClusterName, "", "", "The name of the kubeconfig cluster to use"}, - AuthInfoName: FlagInfo{prefix + FlagAuthInfoName, "", "", "The name of the kubeconfig user to use"}, - Namespace: FlagInfo{prefix + FlagNamespace, "n", "", "If present, the namespace scope for this CLI request"}, - } -} - -// BindAuthInfoFlags is a convenience method to bind the specified flags to their associated variables -func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, flagNames AuthOverrideFlags) { - flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate) - flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey) - flagNames.Token.BindStringFlag(flags, &authInfo.Token) - flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate) - flagNames.Username.BindStringFlag(flags, &authInfo.Username) - flagNames.Password.BindStringFlag(flags, &authInfo.Password) -} - -// BindClusterFlags is a convenience method to bind the specified flags to their associated variables -func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, flagNames ClusterOverrideFlags) { - flagNames.APIServer.BindStringFlag(flags, &clusterInfo.Server) - // TODO: remove --api-version flag in 1.3. - flagNames.APIVersion.BindStringFlag(flags, &clusterInfo.APIVersion) - flags.MarkDeprecated(FlagAPIVersion, "flag is no longer respected and will be deleted in the next release") - flagNames.CertificateAuthority.BindStringFlag(flags, &clusterInfo.CertificateAuthority) - flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify) -} - -// BindOverrideFlags is a convenience method to bind the specified flags to their associated variables -func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) { - BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags) - BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags) - BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags) - flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext) - flagNames.Timeout.BindStringFlag(flags, &overrides.Timeout) -} - -// BindFlags is a convenience method to bind the specified flags to their associated variables -func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) { - flagNames.ClusterName.BindStringFlag(flags, &contextInfo.Cluster) - flagNames.AuthInfoName.BindStringFlag(flags, &contextInfo.AuthInfo) - flagNames.Namespace.BindStringFlag(flags, &contextInfo.Namespace) -} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go deleted file mode 100644 index 284245e96..000000000 --- a/vendor/k8s.io/client-go/tools/clientcmd/validation.go +++ /dev/null @@ -1,270 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "errors" - "fmt" - "os" - "reflect" - "strings" - - utilerrors "k8s.io/client-go/pkg/util/errors" - "k8s.io/client-go/pkg/util/validation" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -var ( - ErrNoContext = errors.New("no context chosen") - ErrEmptyConfig = errors.New("no configuration has been provided") - // message is for consistency with old behavior - ErrEmptyCluster = errors.New("cluster has no server defined") -) - -type errContextNotFound struct { - ContextName string -} - -func (e *errContextNotFound) Error() string { - return fmt.Sprintf("context was not found for specified context: %v", e.ContextName) -} - -// IsContextNotFound returns a boolean indicating whether the error is known to -// report that a context was not found -func IsContextNotFound(err error) bool { - if err == nil { - return false - } - if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext { - return true - } - return strings.Contains(err.Error(), "context was not found for specified context") -} - -// IsEmptyConfig returns true if the provided error indicates the provided configuration -// is empty. -func IsEmptyConfig(err error) bool { - switch t := err.(type) { - case errConfigurationInvalid: - return len(t) == 1 && t[0] == ErrEmptyConfig - } - return err == ErrEmptyConfig -} - -// errConfigurationInvalid is a set of errors indicating the configuration is invalid. -type errConfigurationInvalid []error - -// errConfigurationInvalid implements error and Aggregate -var _ error = errConfigurationInvalid{} -var _ utilerrors.Aggregate = errConfigurationInvalid{} - -func newErrConfigurationInvalid(errs []error) error { - switch len(errs) { - case 0: - return nil - default: - return errConfigurationInvalid(errs) - } -} - -// Error implements the error interface -func (e errConfigurationInvalid) Error() string { - return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error()) -} - -// Errors implements the AggregateError interface -func (e errConfigurationInvalid) Errors() []error { - return e -} - -// IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid. -func IsConfigurationInvalid(err error) bool { - switch err.(type) { - case *errContextNotFound, errConfigurationInvalid: - return true - } - return IsContextNotFound(err) -} - -// Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible. -func Validate(config clientcmdapi.Config) error { - validationErrors := make([]error, 0) - - if clientcmdapi.IsConfigEmpty(&config) { - return newErrConfigurationInvalid([]error{ErrEmptyConfig}) - } - - if len(config.CurrentContext) != 0 { - if _, exists := config.Contexts[config.CurrentContext]; !exists { - validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext}) - } - } - - for contextName, context := range config.Contexts { - validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) - } - - for authInfoName, authInfo := range config.AuthInfos { - validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) - } - - for clusterName, clusterInfo := range config.Clusters { - validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) - } - - return newErrConfigurationInvalid(validationErrors) -} - -// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, -// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. -func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { - validationErrors := make([]error, 0) - - if clientcmdapi.IsConfigEmpty(&config) { - return newErrConfigurationInvalid([]error{ErrEmptyConfig}) - } - - var contextName string - if len(passedContextName) != 0 { - contextName = passedContextName - } else { - contextName = config.CurrentContext - } - - if len(contextName) == 0 { - return ErrNoContext - } - - context, exists := config.Contexts[contextName] - if !exists { - validationErrors = append(validationErrors, &errContextNotFound{contextName}) - } - - if exists { - validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) - validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) - validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) - } - - return newErrConfigurationInvalid(validationErrors) -} - -// validateClusterInfo looks for conflicts and errors in the cluster info -func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error { - validationErrors := make([]error, 0) - - if reflect.DeepEqual(clientcmdapi.Cluster{}, clusterInfo) { - return []error{ErrEmptyCluster} - } - - if len(clusterInfo.Server) == 0 { - if len(clusterName) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) - } else { - validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) - } - } - // Make sure CA data and CA file aren't both specified - if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName)) - } - if len(clusterInfo.CertificateAuthority) != 0 { - clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) - defer clientCertCA.Close() - if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) - } - } - - return validationErrors -} - -// validateAuthInfo looks for conflicts and errors in the auth info -func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error { - validationErrors := make([]error, 0) - - usingAuthPath := false - methods := make([]string, 0, 3) - if len(authInfo.Token) != 0 { - methods = append(methods, "token") - } - if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { - methods = append(methods, "basicAuth") - } - - if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { - // Make sure cert data and file aren't both specified - if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName)) - } - // Make sure key data and file aren't both specified - if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) - } - // Make sure a key is specified - if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName)) - } - - if len(authInfo.ClientCertificate) != 0 { - clientCertFile, err := os.Open(authInfo.ClientCertificate) - defer clientCertFile.Close() - if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) - } - } - if len(authInfo.ClientKey) != 0 { - clientKeyFile, err := os.Open(authInfo.ClientKey) - defer clientKeyFile.Close() - if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) - } - } - } - - // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case - if (len(methods) > 1) && (!usingAuthPath) { - validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) - } - - return validationErrors -} - -// validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return -func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { - validationErrors := make([]error, 0) - - if len(context.AuthInfo) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) - } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { - validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName)) - } - - if len(context.Cluster) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName)) - } else if _, exists := config.Clusters[context.Cluster]; !exists { - validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) - } - - if len(context.Namespace) != 0 { - if len(validation.IsDNS1123Label(context.Namespace)) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) - } - } - - return validationErrors -}