diff --git a/README.md b/README.md index 7164eaeb3..fbe3d3b5e 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,6 @@ You can access to a simple HTML frontend of Træfik. - [Oxy](https://github.com/vulcand/oxy): an awesome proxy library made by Mailgun guys - [Gorilla mux](https://github.com/gorilla/mux): famous request router - [Negroni](https://github.com/codegangsta/negroni): web middlewares made simple -- [Manners](https://github.com/mailgun/manners): graceful shutdown of http.Handler servers - [Lego](https://github.com/xenolf/lego): the best [Let's Encrypt](https://letsencrypt.org) library in go ## Test it diff --git a/build.Dockerfile b/build.Dockerfile index 02440b249..28187b90a 100644 --- a/build.Dockerfile +++ b/build.Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.7 +FROM golang:1.8 # Install a more recent version of mercurial to avoid mismatching results # between glide run on a decently updated host system and the build container. diff --git a/glide.lock b/glide.lock index d5e54a57b..118013102 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 9d7c36c335fe9106ec79cb86a3b3824c23b63d3fb3a3fb4c75bd6915f3afcdd4 -updated: 2017-03-08T18:53:12.139107148-07:00 +hash: 56175c5c588abf1169ba2425ac6dd7e3e5a8cf8ab6ad3f75cad51e45f5e94e3a +updated: 2017-03-23T22:43:06.217624505+01:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 @@ -132,78 +132,12 @@ imports: - name: github.com/docker/distribution version: 325b0804fef3a66309d962357aac3c2ce3f4d329 subpackages: - - context - digest - reference - - registry/api/errcode - - registry/api/v2 - - registry/client - - registry/client/auth - - registry/client/auth/challenge - - registry/client/transport - - registry/storage/cache - - registry/storage/cache/memory - - uuid - name: github.com/docker/docker version: 49bf474f9ed7ce7143a59d1964ff7b7fd9b52178 subpackages: - - api/types - - api/types/backend - - api/types/blkiodev - - api/types/container - - api/types/filters - - api/types/mount - - api/types/network - - api/types/registry - - api/types/strslice - - api/types/swarm - - api/types/versions - - builder - - builder/dockerignore - - cliconfig - - cliconfig/configfile - - daemon/graphdriver - - image - - image/v1 - - layer - namesgenerator - - oci - - opts - - pkg/archive - - pkg/chrootarchive - - pkg/fileutils - - pkg/gitutils - - pkg/homedir - - pkg/httputils - - pkg/idtools - - pkg/ioutils - - pkg/jsonlog - - pkg/jsonmessage - - pkg/longpath - - pkg/mount - - pkg/namesgenerator - - pkg/plugingetter - - pkg/plugins - - pkg/plugins/transport - - pkg/pools - - pkg/progress - - pkg/promise - - pkg/random - - pkg/reexec - - pkg/signal - - pkg/stdcopy - - pkg/streamformatter - - pkg/stringid - - pkg/symlink - - pkg/system - - pkg/tarsum - - pkg/term - - pkg/term/windows - - pkg/urlutil - - plugin/v2 - - reference - - registry - - runconfig/opts - name: github.com/docker/engine-api version: 3d1601b9d2436a70b0dfc045a23f6503d19195df subpackages: @@ -329,8 +263,6 @@ imports: version: 72f9bd7c4e0c2a40055ab3d0f09654f730cce982 - name: github.com/juju/ratelimit version: 77ed1c8a01217656d2080ad51981f6e99adaa177 -- name: github.com/mailgun/manners - version: a585afd9d65c0e05f6c003f921e71ebc05074f4f - name: github.com/mailgun/timetools version: fd192d755b00c968d312d23f521eb0cdc6f66bd0 - name: github.com/mailru/easyjson @@ -382,9 +314,6 @@ imports: - name: github.com/opencontainers/runc version: 1a81e9ab1f138c091fe5c86d0883f87716088527 subpackages: - - libcontainer/configs - - libcontainer/devices - - libcontainer/system - libcontainer/user - name: github.com/ovh/go-ovh version: a8a4c0bc40e56322142649bda7b2b4bb15145b6e @@ -709,63 +638,4 @@ imports: - 1.5/tools/clientcmd/api - 1.5/tools/metrics - 1.5/transport -testImports: -- name: github.com/Azure/go-ansiterm - version: fa152c58bc15761d0200cb75fe958b89a9d4888e - subpackages: - - winterm -- name: github.com/cloudfoundry-incubator/candiedyaml - version: 99c3df83b51532e3615f851d8c2dbb638f5313bf -- name: github.com/docker/libcompose - version: d1876c1d68527a49c0aac22a0b161acc7296b740 - subpackages: - - config - - docker - - docker/builder - - docker/client - - docker/network - - labels - - logger - - lookup - - project - - project/events - - project/options - - utils - - version - - yaml -- name: github.com/flynn/go-shlex - version: 3f9db97f856818214da2e1057f8ad84803971cff -- name: github.com/go-check/check - version: 11d3bc7aa68e238947792f30573146a3231fc0f1 -- name: github.com/gorilla/mux - version: e444e69cbd2e2e3e0749a2f3c717cec491552bbf -- name: github.com/libkermit/compose - version: cadc5a3b83a15790174bd7fbc75ea2529785e772 - subpackages: - - check -- name: github.com/libkermit/docker - version: 55e3595409924fcfbb850811e5a7cdbe8960a0b7 -- name: github.com/libkermit/docker-check - version: cbe0ef03b3d23070eac4d00ba8828f2cc7f7e5a3 -- name: github.com/opencontainers/runtime-spec - version: 06479209bdc0d4135911688c18157bd39bd99c22 - subpackages: - - specs-go -- name: github.com/vbatts/tar-split - version: 6810cedb21b2c3d0b9bb8f9af12ff2dc7a2f14df - subpackages: - - archive/tar - - tar/asm - - tar/storage -- name: github.com/vdemeester/shakers - version: 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 -- name: github.com/xeipuuv/gojsonpointer - version: e0fe6f68307607d540ed8eac07a342c33fa1b54a -- name: github.com/xeipuuv/gojsonreference - version: e02fc20de94c78484cd5ffb007f8af96be030a45 -- name: github.com/xeipuuv/gojsonschema - version: 00f9fafb54d2244d291b86ab63d12c38bd5c3886 -- name: golang.org/x/time - version: a4bde12657593d5e90d0533a3e4fd95e635124cb - subpackages: - - rate +testImports: [] diff --git a/glide.yaml b/glide.yaml index abefcda58..80c57a6f4 100644 --- a/glide.yaml +++ b/glide.yaml @@ -47,7 +47,6 @@ import: - package: github.com/hashicorp/consul subpackages: - api -- package: github.com/mailgun/manners - package: github.com/streamrail/concurrent-map - package: github.com/stretchr/testify subpackages: diff --git a/server.go b/server.go index 7e16b2b5e..86adb9c86 100644 --- a/server.go +++ b/server.go @@ -29,13 +29,13 @@ import ( "github.com/containous/traefik/provider" "github.com/containous/traefik/safe" "github.com/containous/traefik/types" - "github.com/mailgun/manners" "github.com/streamrail/concurrent-map" "github.com/vulcand/oxy/cbreaker" "github.com/vulcand/oxy/connlimit" "github.com/vulcand/oxy/forward" "github.com/vulcand/oxy/roundrobin" "github.com/vulcand/oxy/utils" + "sync" ) var oxyLogger = &OxyLogger{} @@ -58,7 +58,7 @@ type Server struct { type serverEntryPoints map[string]*serverEntryPoint type serverEntryPoint struct { - httpServer *manners.GracefulServer + httpServer *http.Server httpRouter *middlewares.HandlerSwitcher } @@ -114,15 +114,23 @@ func (server *Server) Wait() { // Stop stops the server func (server *Server) Stop() { - for serverEntryPointName, serverEntryPoint := range server.serverEntryPoints { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(server.globalConfiguration.GraceTimeOut)*time.Second) - go func() { - log.Debugf("Waiting %d seconds before killing connections on entrypoint %s...", 30, serverEntryPointName) - serverEntryPoint.httpServer.BlockingClose() + defer log.Info("Server stopped") + var wg sync.WaitGroup + for sepn, sep := range server.serverEntryPoints { + wg.Add(1) + go func(serverEntryPointName string, serverEntryPoint *serverEntryPoint) { + defer wg.Done() + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(server.globalConfiguration.GraceTimeOut)*time.Second) + log.Debugf("Waiting %d seconds before killing connections on entrypoint %s...", server.globalConfiguration.GraceTimeOut, serverEntryPointName) + if err := serverEntryPoint.httpServer.Shutdown(ctx); err != nil { + log.Debugf("Wait is over due to: %s", err) + serverEntryPoint.httpServer.Close() + } cancel() - }() - <-ctx.Done() + log.Debugf("Entrypoint %s closed", serverEntryPointName) + }(sepn, sep) } + wg.Wait() server.stopChan <- true } @@ -191,7 +199,7 @@ func (server *Server) startHTTPServers() { if server.globalConfiguration.EntryPoints[newServerEntryPointName].Compress { serverMiddlewares = append(serverMiddlewares, &middlewares.Compress{}) } - newsrv, err := server.prepareServer(newServerEntryPointName, newServerEntryPoint.httpRouter, server.globalConfiguration.EntryPoints[newServerEntryPointName], nil, serverMiddlewares...) + newsrv, err := server.prepareServer(newServerEntryPointName, newServerEntryPoint.httpRouter, server.globalConfiguration.EntryPoints[newServerEntryPointName], serverMiddlewares...) if err != nil { log.Fatal("Error preparing server: ", err) } @@ -493,21 +501,20 @@ func (server *Server) createTLSConfig(entryPointName string, tlsOption *TLS, rou return config, nil } -func (server *Server) startServer(srv *manners.GracefulServer, globalConfiguration GlobalConfiguration) { +func (server *Server) startServer(srv *http.Server, globalConfiguration GlobalConfiguration) { log.Infof("Starting server on %s", srv.Addr) + var err error if srv.TLSConfig != nil { - if err := srv.ListenAndServeTLSWithConfig(srv.TLSConfig); err != nil { - log.Fatal("Error creating server: ", err) - } + err = srv.ListenAndServeTLS("", "") } else { - if err := srv.ListenAndServe(); err != nil { - log.Fatal("Error creating server: ", err) - } + err = srv.ListenAndServe() + } + if err != nil { + log.Error("Error creating server: ", err) } - log.Info("Server stopped") } -func (server *Server) prepareServer(entryPointName string, router *middlewares.HandlerSwitcher, entryPoint *EntryPoint, oldServer *manners.GracefulServer, middlewares ...negroni.Handler) (*manners.GracefulServer, error) { +func (server *Server) prepareServer(entryPointName string, router *middlewares.HandlerSwitcher, entryPoint *EntryPoint, middlewares ...negroni.Handler) (*http.Server, error) { log.Infof("Preparing server %s %+v", entryPointName, entryPoint) // middlewares var negroni = negroni.New() @@ -521,24 +528,11 @@ func (server *Server) prepareServer(entryPointName string, router *middlewares.H return nil, err } - if oldServer == nil { - return manners.NewWithServer( - &http.Server{ - Addr: entryPoint.Address, - Handler: negroni, - TLSConfig: tlsConfig, - }), nil - } - gracefulServer, err := oldServer.HijackListener(&http.Server{ + return &http.Server{ Addr: entryPoint.Address, Handler: negroni, TLSConfig: tlsConfig, - }, tlsConfig) - if err != nil { - log.Errorf("Error hijacking server: %s", err) - return nil, err - } - return gracefulServer, nil + }, nil } func (server *Server) buildEntryPoints(globalConfiguration GlobalConfiguration) map[string]*serverEntryPoint { diff --git a/vendor/github.com/docker/distribution/context/context.go b/vendor/github.com/docker/distribution/context/context.go deleted file mode 100644 index 23cbf5b54..000000000 --- a/vendor/github.com/docker/distribution/context/context.go +++ /dev/null @@ -1,85 +0,0 @@ -package context - -import ( - "sync" - - "github.com/docker/distribution/uuid" - "golang.org/x/net/context" -) - -// Context is a copy of Context from the golang.org/x/net/context package. -type Context interface { - context.Context -} - -// instanceContext is a context that provides only an instance id. It is -// provided as the main background context. -type instanceContext struct { - Context - id string // id of context, logged as "instance.id" - once sync.Once // once protect generation of the id -} - -func (ic *instanceContext) Value(key interface{}) interface{} { - if key == "instance.id" { - ic.once.Do(func() { - // We want to lazy initialize the UUID such that we don't - // call a random generator from the package initialization - // code. For various reasons random could not be available - // https://github.com/docker/distribution/issues/782 - ic.id = uuid.Generate().String() - }) - return ic.id - } - - return ic.Context.Value(key) -} - -var background = &instanceContext{ - Context: context.Background(), -} - -// Background returns a non-nil, empty Context. The background context -// provides a single key, "instance.id" that is globally unique to the -// process. -func Background() Context { - return background -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. Use context Values only for request-scoped data that transits processes -// and APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key, val interface{}) Context { - return context.WithValue(parent, key, val) -} - -// stringMapContext is a simple context implementation that checks a map for a -// key, falling back to a parent if not present. -type stringMapContext struct { - context.Context - m map[string]interface{} -} - -// WithValues returns a context that proxies lookups through a map. Only -// supports string keys. -func WithValues(ctx context.Context, m map[string]interface{}) context.Context { - mo := make(map[string]interface{}, len(m)) // make our own copy. - for k, v := range m { - mo[k] = v - } - - return stringMapContext{ - Context: ctx, - m: mo, - } -} - -func (smc stringMapContext) Value(key interface{}) interface{} { - if ks, ok := key.(string); ok { - if v, ok := smc.m[ks]; ok { - return v - } - } - - return smc.Context.Value(key) -} diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go deleted file mode 100644 index 3b4ab8882..000000000 --- a/vendor/github.com/docker/distribution/context/doc.go +++ /dev/null @@ -1,89 +0,0 @@ -// Package context provides several utilities for working with -// golang.org/x/net/context in http requests. Primarily, the focus is on -// logging relevant request information but this package is not limited to -// that purpose. -// -// The easiest way to get started is to get the background context: -// -// ctx := context.Background() -// -// The returned context should be passed around your application and be the -// root of all other context instances. If the application has a version, this -// line should be called before anything else: -// -// ctx := context.WithVersion(context.Background(), version) -// -// The above will store the version in the context and will be available to -// the logger. -// -// Logging -// -// The most useful aspect of this package is GetLogger. This function takes -// any context.Context interface and returns the current logger from the -// context. Canonical usage looks like this: -// -// GetLogger(ctx).Infof("something interesting happened") -// -// GetLogger also takes optional key arguments. The keys will be looked up in -// the context and reported with the logger. The following example would -// return a logger that prints the version with each log message: -// -// ctx := context.Context(context.Background(), "version", version) -// GetLogger(ctx, "version").Infof("this log message has a version field") -// -// The above would print out a log message like this: -// -// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m -// -// When used with WithLogger, we gain the ability to decorate the context with -// loggers that have information from disparate parts of the call stack. -// Following from the version example, we can build a new context with the -// configured logger such that we always print the version field: -// -// ctx = WithLogger(ctx, GetLogger(ctx, "version")) -// -// Since the logger has been pushed to the context, we can now get the version -// field for free with our log messages. Future calls to GetLogger on the new -// context will have the version field: -// -// GetLogger(ctx).Infof("this log message has a version field") -// -// This becomes more powerful when we start stacking loggers. Let's say we -// have the version logger from above but also want a request id. Using the -// context above, in our request scoped function, we place another logger in -// the context: -// -// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context -// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) -// -// When GetLogger is called on the new context, "http.request.id" will be -// included as a logger field, along with the original "version" field: -// -// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m -// -// Note that this only affects the new context, the previous context, with the -// version field, can be used independently. Put another way, the new logger, -// added to the request context, is unique to that context and can have -// request scoped varaibles. -// -// HTTP Requests -// -// This package also contains several methods for working with http requests. -// The concepts are very similar to those described above. We simply place the -// request in the context using WithRequest. This makes the request variables -// available. GetRequestLogger can then be called to get request specific -// variables in a log line: -// -// ctx = WithRequest(ctx, req) -// GetRequestLogger(ctx).Infof("request variables") -// -// Like above, if we want to include the request data in all log messages in -// the context, we push the logger to a new context and use that one: -// -// ctx = WithLogger(ctx, GetRequestLogger(ctx)) -// -// The concept is fairly powerful and ensures that calls throughout the stack -// can be traced in log messages. Using the fields like "http.request.id", one -// can analyze call flow for a particular request with a simple grep of the -// logs. -package context diff --git a/vendor/github.com/docker/distribution/context/http.go b/vendor/github.com/docker/distribution/context/http.go deleted file mode 100644 index 7fe9b8ab0..000000000 --- a/vendor/github.com/docker/distribution/context/http.go +++ /dev/null @@ -1,366 +0,0 @@ -package context - -import ( - "errors" - "net" - "net/http" - "strings" - "sync" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/uuid" - "github.com/gorilla/mux" -) - -// Common errors used with this package. -var ( - ErrNoRequestContext = errors.New("no http request in context") - ErrNoResponseWriterContext = errors.New("no http response in context") -) - -func parseIP(ipStr string) net.IP { - ip := net.ParseIP(ipStr) - if ip == nil { - log.Warnf("invalid remote IP address: %q", ipStr) - } - return ip -} - -// RemoteAddr extracts the remote address of the request, taking into -// account proxy headers. -func RemoteAddr(r *http.Request) string { - if prior := r.Header.Get("X-Forwarded-For"); prior != "" { - proxies := strings.Split(prior, ",") - if len(proxies) > 0 { - remoteAddr := strings.Trim(proxies[0], " ") - if parseIP(remoteAddr) != nil { - return remoteAddr - } - } - } - // X-Real-Ip is less supported, but worth checking in the - // absence of X-Forwarded-For - if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { - if parseIP(realIP) != nil { - return realIP - } - } - - return r.RemoteAddr -} - -// RemoteIP extracts the remote IP of the request, taking into -// account proxy headers. -func RemoteIP(r *http.Request) string { - addr := RemoteAddr(r) - - // Try parsing it as "IP:port" - if ip, _, err := net.SplitHostPort(addr); err == nil { - return ip - } - - return addr -} - -// WithRequest places the request on the context. The context of the request -// is assigned a unique id, available at "http.request.id". The request itself -// is available at "http.request". Other common attributes are available under -// the prefix "http.request.". If a request is already present on the context, -// this method will panic. -func WithRequest(ctx Context, r *http.Request) Context { - if ctx.Value("http.request") != nil { - // NOTE(stevvooe): This needs to be considered a programming error. It - // is unlikely that we'd want to have more than one request in - // context. - panic("only one request per context") - } - - return &httpRequestContext{ - Context: ctx, - startedAt: time.Now(), - id: uuid.Generate().String(), - r: r, - } -} - -// GetRequest returns the http request in the given context. Returns -// ErrNoRequestContext if the context does not have an http request associated -// with it. -func GetRequest(ctx Context) (*http.Request, error) { - if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { - return r, nil - } - return nil, ErrNoRequestContext -} - -// GetRequestID attempts to resolve the current request id, if possible. An -// error is return if it is not available on the context. -func GetRequestID(ctx Context) string { - return GetStringValue(ctx, "http.request.id") -} - -// WithResponseWriter returns a new context and response writer that makes -// interesting response statistics available within the context. -func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { - if closeNotifier, ok := w.(http.CloseNotifier); ok { - irwCN := &instrumentedResponseWriterCN{ - instrumentedResponseWriter: instrumentedResponseWriter{ - ResponseWriter: w, - Context: ctx, - }, - CloseNotifier: closeNotifier, - } - - return irwCN, irwCN - } - - irw := instrumentedResponseWriter{ - ResponseWriter: w, - Context: ctx, - } - return &irw, &irw -} - -// GetResponseWriter returns the http.ResponseWriter from the provided -// context. If not present, ErrNoResponseWriterContext is returned. The -// returned instance provides instrumentation in the context. -func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { - v := ctx.Value("http.response") - - rw, ok := v.(http.ResponseWriter) - if !ok || rw == nil { - return nil, ErrNoResponseWriterContext - } - - return rw, nil -} - -// getVarsFromRequest let's us change request vars implementation for testing -// and maybe future changes. -var getVarsFromRequest = mux.Vars - -// WithVars extracts gorilla/mux vars and makes them available on the returned -// context. Variables are available at keys with the prefix "vars.". For -// example, if looking for the variable "name", it can be accessed as -// "vars.name". Implementations that are accessing values need not know that -// the underlying context is implemented with gorilla/mux vars. -func WithVars(ctx Context, r *http.Request) Context { - return &muxVarsContext{ - Context: ctx, - vars: getVarsFromRequest(r), - } -} - -// GetRequestLogger returns a logger that contains fields from the request in -// the current context. If the request is not available in the context, no -// fields will display. Request loggers can safely be pushed onto the context. -func GetRequestLogger(ctx Context) Logger { - return GetLogger(ctx, - "http.request.id", - "http.request.method", - "http.request.host", - "http.request.uri", - "http.request.referer", - "http.request.useragent", - "http.request.remoteaddr", - "http.request.contenttype") -} - -// GetResponseLogger reads the current response stats and builds a logger. -// Because the values are read at call time, pushing a logger returned from -// this function on the context will lead to missing or invalid data. Only -// call this at the end of a request, after the response has been written. -func GetResponseLogger(ctx Context) Logger { - l := getLogrusLogger(ctx, - "http.response.written", - "http.response.status", - "http.response.contenttype") - - duration := Since(ctx, "http.request.startedat") - - if duration > 0 { - l = l.WithField("http.response.duration", duration.String()) - } - - return l -} - -// httpRequestContext makes information about a request available to context. -type httpRequestContext struct { - Context - - startedAt time.Time - id string - r *http.Request -} - -// Value returns a keyed element of the request for use in the context. To get -// the request itself, query "request". For other components, access them as -// "request.". For example, r.RequestURI -func (ctx *httpRequestContext) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.request" { - return ctx.r - } - - if !strings.HasPrefix(keyStr, "http.request.") { - goto fallback - } - - parts := strings.Split(keyStr, ".") - - if len(parts) != 3 { - goto fallback - } - - switch parts[2] { - case "uri": - return ctx.r.RequestURI - case "remoteaddr": - return RemoteAddr(ctx.r) - case "method": - return ctx.r.Method - case "host": - return ctx.r.Host - case "referer": - referer := ctx.r.Referer() - if referer != "" { - return referer - } - case "useragent": - return ctx.r.UserAgent() - case "id": - return ctx.id - case "startedat": - return ctx.startedAt - case "contenttype": - ct := ctx.r.Header.Get("Content-Type") - if ct != "" { - return ct - } - } - } - -fallback: - return ctx.Context.Value(key) -} - -type muxVarsContext struct { - Context - vars map[string]string -} - -func (ctx *muxVarsContext) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "vars" { - return ctx.vars - } - - if strings.HasPrefix(keyStr, "vars.") { - keyStr = strings.TrimPrefix(keyStr, "vars.") - } - - if v, ok := ctx.vars[keyStr]; ok { - return v - } - } - - return ctx.Context.Value(key) -} - -// instrumentedResponseWriterCN provides response writer information in a -// context. It implements http.CloseNotifier so that users can detect -// early disconnects. -type instrumentedResponseWriterCN struct { - instrumentedResponseWriter - http.CloseNotifier -} - -// instrumentedResponseWriter provides response writer information in a -// context. This variant is only used in the case where CloseNotifier is not -// implemented by the parent ResponseWriter. -type instrumentedResponseWriter struct { - http.ResponseWriter - Context - - mu sync.Mutex - status int - written int64 -} - -func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { - n, err = irw.ResponseWriter.Write(p) - - irw.mu.Lock() - irw.written += int64(n) - - // Guess the likely status if not set. - if irw.status == 0 { - irw.status = http.StatusOK - } - - irw.mu.Unlock() - - return -} - -func (irw *instrumentedResponseWriter) WriteHeader(status int) { - irw.ResponseWriter.WriteHeader(status) - - irw.mu.Lock() - irw.status = status - irw.mu.Unlock() -} - -func (irw *instrumentedResponseWriter) Flush() { - if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { - flusher.Flush() - } -} - -func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.response" { - return irw - } - - if !strings.HasPrefix(keyStr, "http.response.") { - goto fallback - } - - parts := strings.Split(keyStr, ".") - - if len(parts) != 3 { - goto fallback - } - - irw.mu.Lock() - defer irw.mu.Unlock() - - switch parts[2] { - case "written": - return irw.written - case "status": - return irw.status - case "contenttype": - contentType := irw.Header().Get("Content-Type") - if contentType != "" { - return contentType - } - } - } - -fallback: - return irw.Context.Value(key) -} - -func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.response" { - return irw - } - } - - return irw.instrumentedResponseWriter.Value(key) -} diff --git a/vendor/github.com/docker/distribution/context/logger.go b/vendor/github.com/docker/distribution/context/logger.go deleted file mode 100644 index fbb6a0511..000000000 --- a/vendor/github.com/docker/distribution/context/logger.go +++ /dev/null @@ -1,116 +0,0 @@ -package context - -import ( - "fmt" - - "github.com/Sirupsen/logrus" - "runtime" -) - -// Logger provides a leveled-logging interface. -type Logger interface { - // standard logger methods - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) - - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - - Panic(args ...interface{}) - Panicf(format string, args ...interface{}) - Panicln(args ...interface{}) - - // Leveled methods, from logrus - Debug(args ...interface{}) - Debugf(format string, args ...interface{}) - Debugln(args ...interface{}) - - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Errorln(args ...interface{}) - - Info(args ...interface{}) - Infof(format string, args ...interface{}) - Infoln(args ...interface{}) - - Warn(args ...interface{}) - Warnf(format string, args ...interface{}) - Warnln(args ...interface{}) -} - -// WithLogger creates a new context with provided logger. -func WithLogger(ctx Context, logger Logger) Context { - return WithValue(ctx, "logger", logger) -} - -// GetLoggerWithField returns a logger instance with the specified field key -// and value without affecting the context. Extra specified keys will be -// resolved from the context. -func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { - return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) -} - -// GetLoggerWithFields returns a logger instance with the specified fields -// without affecting the context. Extra specified keys will be resolved from -// the context. -func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { - // must convert from interface{} -> interface{} to string -> interface{} for logrus. - lfields := make(logrus.Fields, len(fields)) - for key, value := range fields { - lfields[fmt.Sprint(key)] = value - } - - return getLogrusLogger(ctx, keys...).WithFields(lfields) -} - -// GetLogger returns the logger from the current context, if present. If one -// or more keys are provided, they will be resolved on the context and -// included in the logger. While context.Value takes an interface, any key -// argument passed to GetLogger will be passed to fmt.Sprint when expanded as -// a logging key field. If context keys are integer constants, for example, -// its recommended that a String method is implemented. -func GetLogger(ctx Context, keys ...interface{}) Logger { - return getLogrusLogger(ctx, keys...) -} - -// GetLogrusLogger returns the logrus logger for the context. If one more keys -// are provided, they will be resolved on the context and included in the -// logger. Only use this function if specific logrus functionality is -// required. -func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { - var logger *logrus.Entry - - // Get a logger, if it is present. - loggerInterface := ctx.Value("logger") - if loggerInterface != nil { - if lgr, ok := loggerInterface.(*logrus.Entry); ok { - logger = lgr - } - } - - if logger == nil { - fields := logrus.Fields{} - - // Fill in the instance id, if we have it. - instanceID := ctx.Value("instance.id") - if instanceID != nil { - fields["instance.id"] = instanceID - } - - fields["go.version"] = runtime.Version() - // If no logger is found, just return the standard logger. - logger = logrus.StandardLogger().WithFields(fields) - } - - fields := logrus.Fields{} - for _, key := range keys { - v := ctx.Value(key) - if v != nil { - fields[fmt.Sprint(key)] = v - } - } - - return logger.WithFields(fields) -} diff --git a/vendor/github.com/docker/distribution/context/trace.go b/vendor/github.com/docker/distribution/context/trace.go deleted file mode 100644 index 721964a84..000000000 --- a/vendor/github.com/docker/distribution/context/trace.go +++ /dev/null @@ -1,104 +0,0 @@ -package context - -import ( - "runtime" - "time" - - "github.com/docker/distribution/uuid" -) - -// WithTrace allocates a traced timing span in a new context. This allows a -// caller to track the time between calling WithTrace and the returned done -// function. When the done function is called, a log message is emitted with a -// "trace.duration" field, corresponding to the elapsed time and a -// "trace.func" field, corresponding to the function that called WithTrace. -// -// The logging keys "trace.id" and "trace.parent.id" are provided to implement -// dapper-like tracing. This function should be complemented with a WithSpan -// method that could be used for tracing distributed RPC calls. -// -// The main benefit of this function is to post-process log messages or -// intercept them in a hook to provide timing data. Trace ids and parent ids -// can also be linked to provide call tracing, if so required. -// -// Here is an example of the usage: -// -// func timedOperation(ctx Context) { -// ctx, done := WithTrace(ctx) -// defer done("this will be the log message") -// // ... function body ... -// } -// -// If the function ran for roughly 1s, such a usage would emit a log message -// as follows: -// -// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... -// -// Notice that the function name is automatically resolved, along with the -// package and a trace id is emitted that can be linked with parent ids. -func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { - if ctx == nil { - ctx = Background() - } - - pc, file, line, _ := runtime.Caller(1) - f := runtime.FuncForPC(pc) - ctx = &traced{ - Context: ctx, - id: uuid.Generate().String(), - start: time.Now(), - parent: GetStringValue(ctx, "trace.id"), - fnname: f.Name(), - file: file, - line: line, - } - - return ctx, func(format string, a ...interface{}) { - GetLogger(ctx, - "trace.duration", - "trace.id", - "trace.parent.id", - "trace.func", - "trace.file", - "trace.line"). - Debugf(format, a...) - } -} - -// traced represents a context that is traced for function call timing. It -// also provides fast lookup for the various attributes that are available on -// the trace. -type traced struct { - Context - id string - parent string - start time.Time - fnname string - file string - line int -} - -func (ts *traced) Value(key interface{}) interface{} { - switch key { - case "trace.start": - return ts.start - case "trace.duration": - return time.Since(ts.start) - case "trace.id": - return ts.id - case "trace.parent.id": - if ts.parent == "" { - return nil // must return nil to signal no parent. - } - - return ts.parent - case "trace.func": - return ts.fnname - case "trace.file": - return ts.file - case "trace.line": - return ts.line - } - - return ts.Context.Value(key) -} diff --git a/vendor/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go deleted file mode 100644 index cb9ef52e3..000000000 --- a/vendor/github.com/docker/distribution/context/util.go +++ /dev/null @@ -1,24 +0,0 @@ -package context - -import ( - "time" -) - -// Since looks up key, which should be a time.Time, and returns the duration -// since that time. If the key is not found, the value returned will be zero. -// This is helpful when inferring metrics related to context execution times. -func Since(ctx Context, key interface{}) time.Duration { - if startedAt, ok := ctx.Value(key).(time.Time); ok { - return time.Since(startedAt) - } - return 0 -} - -// GetStringValue returns a string value from the context. The empty string -// will be returned if not found. -func GetStringValue(ctx Context, key interface{}) (value string) { - if valuev, ok := ctx.Value(key).(string); ok { - value = valuev - } - return value -} diff --git a/vendor/github.com/docker/distribution/context/version.go b/vendor/github.com/docker/distribution/context/version.go deleted file mode 100644 index 746cda02e..000000000 --- a/vendor/github.com/docker/distribution/context/version.go +++ /dev/null @@ -1,16 +0,0 @@ -package context - -// WithVersion stores the application version in the context. The new context -// gets a logger to ensure log messages are marked with the application -// version. -func WithVersion(ctx Context, version string) Context { - ctx = WithValue(ctx, "version", version) - // push a new logger onto the stack - return WithLogger(ctx, GetLogger(ctx, "version")) -} - -// GetVersion returns the application version from the context. An empty -// string may returned if the version was not set on the context. -func GetVersion(ctx Context) string { - return GetStringValue(ctx, "version") -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go deleted file mode 100644 index 6d9bb4b62..000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ /dev/null @@ -1,267 +0,0 @@ -package errcode - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr.(type) { - case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) - case Error: - err = daErr.(Error) - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go deleted file mode 100644 index 49a64a86e..000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go +++ /dev/null @@ -1,44 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" -) - -// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err -// and sets the content-type header to 'application/json'. It will handle -// ErrorCoder and Errors, and if necessary will create an envelope. -func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - var sc int - - switch errs := err.(type) { - case Errors: - if len(errs) < 1 { - break - } - - if err, ok := errs[0].(ErrorCoder); ok { - sc = err.ErrorCode().Descriptor().HTTPStatusCode - } - case ErrorCoder: - sc = errs.ErrorCode().Descriptor().HTTPStatusCode - err = Errors{err} // create an envelope. - default: - // We just have an unhandled error type, so just place in an envelope - // and move along. - err = Errors{err} - } - - if sc == 0 { - sc = http.StatusInternalServerError - } - - w.WriteHeader(sc) - - if err := json.NewEncoder(w).Encode(err); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go deleted file mode 100644 index d1e8826c6..000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/register.go +++ /dev/null @@ -1,138 +0,0 @@ -package errcode - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go deleted file mode 100644 index 9979abae6..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go +++ /dev/null @@ -1,1596 +0,0 @@ -package v2 - -import ( - "net/http" - "regexp" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" -) - -var ( - nameParameterDescriptor = ParameterDescriptor{ - Name: "name", - Type: "string", - Format: reference.NameRegexp.String(), - Required: true, - Description: `Name of the target repository.`, - } - - referenceParameterDescriptor = ParameterDescriptor{ - Name: "reference", - Type: "string", - Format: reference.TagRegexp.String(), - Required: true, - Description: `Tag or digest of the target manifest.`, - } - - uuidParameterDescriptor = ParameterDescriptor{ - Name: "uuid", - Type: "opaque", - Required: true, - Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", - } - - digestPathParameter = ParameterDescriptor{ - Name: "digest", - Type: "path", - Required: true, - Format: digest.DigestRegexp.String(), - Description: `Digest of desired blob.`, - } - - hostHeader = ParameterDescriptor{ - Name: "Host", - Type: "string", - Description: "Standard HTTP Host Header. Should be set to the registry host.", - Format: "", - Examples: []string{"registry-1.docker.io"}, - } - - authHeader = ParameterDescriptor{ - Name: "Authorization", - Type: "string", - Description: "An RFC7235 compliant authorization header.", - Format: " ", - Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, - } - - authChallengeHeader = ParameterDescriptor{ - Name: "WWW-Authenticate", - Type: "string", - Description: "An RFC7235 compliant authentication challenge header.", - Format: ` realm="", ..."`, - Examples: []string{ - `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, - }, - } - - contentLengthZeroHeader = ParameterDescriptor{ - Name: "Content-Length", - Description: "The `Content-Length` header must be zero and the body must be empty.", - Type: "integer", - Format: "0", - } - - dockerUploadUUIDHeader = ParameterDescriptor{ - Name: "Docker-Upload-UUID", - Description: "Identifies the docker upload uuid for the current request.", - Type: "uuid", - Format: "", - } - - digestHeader = ParameterDescriptor{ - Name: "Docker-Content-Digest", - Description: "Digest of the targeted content for the request.", - Type: "digest", - Format: "", - } - - linkHeader = ParameterDescriptor{ - Name: "Link", - Type: "link", - Description: "RFC5988 compliant rel='next' with URL to next result set, if available", - Format: `<?n=&last=>; rel="next"`, - } - - paginationParameters = []ParameterDescriptor{ - { - Name: "n", - Type: "integer", - Description: "Limit the number of entries in each response. It not present, all entries will be returned.", - Format: "", - Required: false, - }, - { - Name: "last", - Type: "string", - Description: "Result set will include values lexically after last.", - Format: "", - Required: false, - }, - } - - unauthorizedResponseDescriptor = ResponseDescriptor{ - Name: "Authentication Required", - StatusCode: http.StatusUnauthorized, - Description: "The client is not authenticated.", - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - } - - repositoryNotFoundResponseDescriptor = ResponseDescriptor{ - Name: "No Such Repository Error", - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - } - - deniedResponseDescriptor = ResponseDescriptor{ - Name: "Access Denied", - StatusCode: http.StatusForbidden, - Description: "The client does not have required access to the repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeDenied, - }, - } - - tooManyRequestsDescriptor = ResponseDescriptor{ - Name: "Too Many Requests", - StatusCode: http.StatusTooManyRequests, - Description: "The client made too many requests within a time interval.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeTooManyRequests, - }, - } -) - -const ( - manifestBody = `{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -}` - - errorsBody = `{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -}` -) - -// APIDescriptor exports descriptions of the layout of the v2 registry API. -var APIDescriptor = struct { - // RouteDescriptors provides a list of the routes available in the API. - RouteDescriptors []RouteDescriptor -}{ - RouteDescriptors: routeDescriptors, -} - -// RouteDescriptor describes a route specified by name. -type RouteDescriptor struct { - // Name is the name of the route, as specified in RouteNameXXX exports. - // These names a should be considered a unique reference for a route. If - // the route is registered with gorilla, this is the name that will be - // used. - Name string - - // Path is a gorilla/mux-compatible regexp that can be used to match the - // route. For any incoming method and path, only one route descriptor - // should match. - Path string - - // Entity should be a short, human-readalbe description of the object - // targeted by the endpoint. - Entity string - - // Description should provide an accurate overview of the functionality - // provided by the route. - Description string - - // Methods should describe the various HTTP methods that may be used on - // this route, including request and response formats. - Methods []MethodDescriptor -} - -// MethodDescriptor provides a description of the requests that may be -// conducted with the target method. -type MethodDescriptor struct { - - // Method is an HTTP method, such as GET, PUT or POST. - Method string - - // Description should provide an overview of the functionality provided by - // the covered method, suitable for use in documentation. Use of markdown - // here is encouraged. - Description string - - // Requests is a slice of request descriptors enumerating how this - // endpoint may be used. - Requests []RequestDescriptor -} - -// RequestDescriptor covers a particular set of headers and parameters that -// can be carried out with the parent method. Its most helpful to have one -// RequestDescriptor per API use case. -type RequestDescriptor struct { - // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particular request. - Name string - - // Description should cover the requests purpose, covering any details for - // this particular use case. - Description string - - // Headers describes headers that must be used with the HTTP request. - Headers []ParameterDescriptor - - // PathParameters enumerate the parameterized path components for the - // given request, as defined in the route's regular expression. - PathParameters []ParameterDescriptor - - // QueryParameters provides a list of query parameters for the given - // request. - QueryParameters []ParameterDescriptor - - // Body describes the format of the request body. - Body BodyDescriptor - - // Successes enumerates the possible responses that are considered to be - // the result of a successful request. - Successes []ResponseDescriptor - - // Failures covers the possible failures from this particular request. - Failures []ResponseDescriptor -} - -// ResponseDescriptor describes the components of an API response. -type ResponseDescriptor struct { - // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particular response. - Name string - - // Description should provide a brief overview of the role of the - // response. - Description string - - // StatusCode specifies the status received by this particular response. - StatusCode int - - // Headers covers any headers that may be returned from the response. - Headers []ParameterDescriptor - - // Fields describes any fields that may be present in the response. - Fields []ParameterDescriptor - - // ErrorCodes enumerates the error codes that may be returned along with - // the response. - ErrorCodes []errcode.ErrorCode - - // Body describes the body of the response, if any. - Body BodyDescriptor -} - -// BodyDescriptor describes a request body and its expected content type. For -// the most part, it should be example json or some placeholder for body -// data in documentation. -type BodyDescriptor struct { - ContentType string - Format string -} - -// ParameterDescriptor describes the format of a request parameter, which may -// be a header, path parameter or query parameter. -type ParameterDescriptor struct { - // Name is the name of the parameter, either of the path component or - // query parameter. - Name string - - // Type specifies the type of the parameter, such as string, integer, etc. - Type string - - // Description provides a human-readable description of the parameter. - Description string - - // Required means the field is required when set. - Required bool - - // Format is a specifying the string format accepted by this parameter. - Format string - - // Regexp is a compiled regular expression that can be used to validate - // the contents of the parameter. - Regexp *regexp.Regexp - - // Examples provides multiple examples for the values that might be valid - // for this parameter. - Examples []string -} - -var routeDescriptors = []RouteDescriptor{ - { - Name: RouteNameBase, - Path: "/v2/", - Entity: "Base", - Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Check that the endpoint implements Docker Registry API V2.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Successes: []ResponseDescriptor{ - { - Description: "The API implements V2 protocol and is accessible.", - StatusCode: http.StatusOK, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The registry does not implement the V2 API.", - StatusCode: http.StatusNotFound, - }, - unauthorizedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameTags, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", - Entity: "Tags", - Description: "Retrieve information about tags.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the tags under the repository identified by `name`.", - Requests: []RequestDescriptor{ - { - Name: "Tags", - Description: "Return all tags for the repository", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ] -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Tags Paginated", - Description: "Return a portion of the tags for the specified repository.", - PathParameters: []ParameterDescriptor{nameParameterDescriptor}, - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ], -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameManifest, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", - Entity: "Manifest", - Description: "Create, update, delete and retrieve manifests.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The name or reference was invalid.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The canonical location url of the uploaded manifest.", - Format: "", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Manifest", - Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", - StatusCode: http.StatusBadRequest, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - ErrorCodeManifestInvalid, - ErrorCodeManifestUnverified, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Missing Layer(s)", - Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": "" - } - }, - ... - ] -}`, - }, - }, - { - Name: "Not allowed", - Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Reference", - Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Unknown Manifest", - Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Name: "Not allowed", - Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlob, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", - Entity: "Blob", - Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Name: "Fetch Blob", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob content.", - Format: "", - }, - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - { - Description: "The blob identified by `digest` is available at the provided location.", - StatusCode: http.StatusTemporaryRedirect, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The location where the layer should be accessible.", - Format: "", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Fetch Blob Part", - Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Range", - Type: "string", - Description: "HTTP Range header specifying blob chunk.", - Format: "bytes=-", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", - StatusCode: http.StatusPartialContent, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob chunk.", - Format: "", - }, - { - Name: "Content-Range", - Type: "byte range", - Description: "Content range of blob chunk.", - Format: "bytes -/", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the blob identified by `name` and `digest`", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "0", - Format: "0", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - { - Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", - StatusCode: http.StatusMethodNotAllowed, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - - // TODO(stevvooe): We may want to add a PUT request here to - // kickoff an upload of a blob, integrated with the blob upload - // API. - }, - }, - - { - Name: RouteNameBlobUpload, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", - Entity: "Initiate Blob Upload", - Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", - Methods: []MethodDescriptor{ - { - Method: "POST", - Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", - Requests: []RequestDescriptor{ - { - Name: "Initiate Monolithic Blob Upload", - Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octect-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been created in the registry and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Initiate Resumable Blob Upload", - Description: "Initiate a resumable blob upload with an empty request body.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Format: "0-0", - Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", - }, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Mount Blob", - Description: "Mount a blob identified by the `mount` parameter from another repository.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "mount", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of blob to mount from the source repository.`, - }, - { - Name: "from", - Type: "query", - Format: "", - Regexp: reference.NameRegexp, - Description: `Name of the source repository.`, - }, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been mounted in the repository and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", - Entity: "Blob Upload", - Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", - Requests: []RequestDescriptor{ - { - Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Progress", - Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PATCH", - Description: "Upload a chunk of data for the specified upload.", - Requests: []RequestDescriptor{ - { - Name: "Stream upload", - Description: "Upload a stream of data to upload without completing the upload.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Data Accepted", - Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Chunked upload", - Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Required: true, - Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", - }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding the length of the request body.", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Chunk Accepted", - Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", - Requests: []RequestDescriptor{ - { - Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "string", - Format: "", - Regexp: digest.DigestRegexp, - Required: true, - Description: `Digest of uploaded blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Complete", - Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - Description: "The canonical location of the blob for retrieval", - }, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - errcode.ErrorCodeUnsupported, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", - Requests: []RequestDescriptor{ - { - Description: "Cancel the upload specified by `uuid`.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Deleted", - Description: "The upload has been successfully deleted.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "An error was encountered processing the delete. The client may ignore this error.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameCatalog, - Path: "/v2/_catalog", - Entity: "Catalog", - Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve a sorted, json list of repositories available in the registry.", - Requests: []RequestDescriptor{ - { - Name: "Catalog Fetch", - Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", - Successes: []ResponseDescriptor{ - { - Description: "Returns the unabridged list of repositories as a json response.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] -}`, - }, - }, - }, - }, - { - Name: "Catalog Fetch Paginated", - Description: "Return the specified portion of repositories.", - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] - "next": "?last=&n=" -}`, - }, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - }, - }, - }, - }, - }, - }, - }, -} - -var routeDescriptorsMap map[string]RouteDescriptor - -func init() { - routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - - for _, descriptor := range routeDescriptors { - routeDescriptorsMap[descriptor.Name] = descriptor - } -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go deleted file mode 100644 index cde011959..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package v2 describes routes, urls and the error codes used in the Docker -// Registry JSON HTTP API V2. In addition to declarations, descriptors are -// provided for routes and error codes that can be used for implementation and -// automatically generating documentation. -// -// Definitions here are considered to be locked down for the V2 registry api. -// Any changes must be considered carefully and should not proceed without a -// change proposal in docker core. -package v2 diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go deleted file mode 100644 index 97d6923aa..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/errors.go +++ /dev/null @@ -1,136 +0,0 @@ -package v2 - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -const errGroup = "registry.api.v2" - -var ( - // ErrorCodeDigestInvalid is returned when uploading a blob if the - // provided digest does not match the blob contents. - ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameInvalid is returned when the name in the manifest does not - // match the provided name. - ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeTagInvalid is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestInvalid returned when an image manifest is invalid, - // typically during a PUT operation. This error encompasses all errors - // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verification. - ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestBlobUnknown is returned when a manifest blob is - // unknown to the registry. - ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a manifest blob is - unknown to the registry.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeBlobUnknown is returned when a blob is unknown to the - // registry. This can happen when the manifest references a nonexistent - // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCode: http.StatusNotFound, - }) -) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go deleted file mode 100644 index 9bc41a3a6..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go +++ /dev/null @@ -1,161 +0,0 @@ -package v2 - -import ( - "fmt" - "regexp" - "strings" - "unicode" -) - -var ( - // according to rfc7230 - reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) - reQuotedValue = regexp.MustCompile(`^[^\\"]+`) - reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) -) - -// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains -// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The -// function parses only the first element of the list, which is set by the very first proxy. It returns a map -// of corresponding key-value pairs and an unparsed slice of the input string. -// -// Examples of Forwarded header values: -// -// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown -// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" -// -// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into -// {"for": "192.0.2.43:443", "host": "registry.example.org"}. -func parseForwardedHeader(forwarded string) (map[string]string, string, error) { - // Following are states of forwarded header parser. Any state could transition to a failure. - const ( - // terminating state; can transition to Parameter - stateElement = iota - // terminating state; can transition to KeyValueDelimiter - stateParameter - // can transition to Value - stateKeyValueDelimiter - // can transition to one of { QuotedValue, PairEnd } - stateValue - // can transition to one of { EscapedCharacter, PairEnd } - stateQuotedValue - // can transition to one of { QuotedValue } - stateEscapedCharacter - // terminating state; can transition to one of { Parameter, Element } - statePairEnd - ) - - var ( - parameter string - value string - parse = forwarded[:] - res = map[string]string{} - state = stateElement - ) - -Loop: - for { - // skip spaces unless in quoted value - if state != stateQuotedValue && state != stateEscapedCharacter { - parse = strings.TrimLeftFunc(parse, unicode.IsSpace) - } - - if len(parse) == 0 { - if state != stateElement && state != statePairEnd && state != stateParameter { - return nil, parse, fmt.Errorf("unexpected end of input") - } - // terminating - break - } - - switch state { - // terminate at list element delimiter - case stateElement: - if parse[0] == ',' { - parse = parse[1:] - break Loop - } - state = stateParameter - - // parse parameter (the key of key-value pair) - case stateParameter: - match := reToken.FindString(parse) - if len(match) == 0 { - return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) - } - parameter = strings.ToLower(match) - parse = parse[len(match):] - state = stateKeyValueDelimiter - - // parse '=' - case stateKeyValueDelimiter: - if parse[0] != '=' { - return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) - } - parse = parse[1:] - state = stateValue - - // parse value or quoted value - case stateValue: - if parse[0] == '"' { - parse = parse[1:] - state = stateQuotedValue - } else { - value = reToken.FindString(parse) - if len(value) == 0 { - return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) - } - if _, exists := res[parameter]; exists { - return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) - } - res[parameter] = value - parse = parse[len(value):] - value = "" - state = statePairEnd - } - - // parse a part of quoted value until the first backslash - case stateQuotedValue: - match := reQuotedValue.FindString(parse) - value += match - parse = parse[len(match):] - switch { - case len(parse) == 0: - return nil, parse, fmt.Errorf("unterminated quoted string") - case parse[0] == '"': - res[parameter] = value - value = "" - parse = parse[1:] - state = statePairEnd - case parse[0] == '\\': - parse = parse[1:] - state = stateEscapedCharacter - } - - // parse escaped character in a quoted string, ignore the backslash - // transition back to QuotedValue state - case stateEscapedCharacter: - c := reEscapedCharacter.FindString(parse) - if len(c) == 0 { - return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) - } - value += c - parse = parse[1:] - state = stateQuotedValue - - // expect either a new key-value pair, new list or end of input - case statePairEnd: - switch parse[0] { - case ';': - parse = parse[1:] - state = stateParameter - case ',': - state = stateElement - default: - return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) - } - } - } - - return res, parse, nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go deleted file mode 100644 index 5b80d5be7..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/routes.go +++ /dev/null @@ -1,49 +0,0 @@ -package v2 - -import "github.com/gorilla/mux" - -// The following are definitions of the name under which all V2 routes are -// registered. These symbols can be used to look up a route based on the name. -const ( - RouteNameBase = "base" - RouteNameManifest = "manifest" - RouteNameTags = "tags" - RouteNameBlob = "blob" - RouteNameBlobUpload = "blob-upload" - RouteNameBlobUploadChunk = "blob-upload-chunk" - RouteNameCatalog = "catalog" -) - -var allEndpoints = []string{ - RouteNameManifest, - RouteNameCatalog, - RouteNameTags, - RouteNameBlob, - RouteNameBlobUpload, - RouteNameBlobUploadChunk, -} - -// Router builds a gorilla router with named routes for the various API -// methods. This can be used directly by both server implementations and -// clients. -func Router() *mux.Router { - return RouterWithPrefix("") -} - -// RouterWithPrefix builds a gorilla router with a configured prefix -// on all routes. -func RouterWithPrefix(prefix string) *mux.Router { - rootRouter := mux.NewRouter() - router := rootRouter - if prefix != "" { - router = router.PathPrefix(prefix).Subrouter() - } - - router.StrictSlash(true) - - for _, descriptor := range routeDescriptors { - router.Path(descriptor.Path).Name(descriptor.Name) - } - - return rootRouter -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go deleted file mode 100644 index e2e242eab..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ /dev/null @@ -1,314 +0,0 @@ -package v2 - -import ( - "net" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/docker/distribution/reference" - "github.com/gorilla/mux" -) - -// URLBuilder creates registry API urls from a single base endpoint. It can be -// used to create urls for use in a registry client or server. -// -// All urls will be created from the given base, including the api version. -// For example, if a root of "/foo/" is provided, urls generated will be fall -// under "/foo/v2/...". Most application will only provide a schema, host and -// port, such as "https://localhost:5000/". -type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router - relative bool -} - -// NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { - return &URLBuilder{ - root: root, - router: Router(), - relative: relative, - } -} - -// NewURLBuilderFromString workes identically to NewURLBuilder except it takes -// a string argument for the root, returning an error if it is not a valid -// url. -func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { - u, err := url.Parse(root) - if err != nil { - return nil, err - } - - return NewURLBuilder(u, relative), nil -} - -// NewURLBuilderFromRequest uses information from an *http.Request to -// construct the root url. -func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { - var scheme string - - forwardedProto := r.Header.Get("X-Forwarded-Proto") - // TODO: log the error - forwardedHeader, _, _ := parseForwardedHeader(r.Header.Get("Forwarded")) - - switch { - case len(forwardedProto) > 0: - scheme = forwardedProto - case len(forwardedHeader["proto"]) > 0: - scheme = forwardedHeader["proto"] - case r.TLS != nil: - scheme = "https" - case len(r.URL.Scheme) > 0: - scheme = r.URL.Scheme - default: - scheme = "http" - } - - host := r.Host - - if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { - // According to the Apache mod_proxy docs, X-Forwarded-Host can be a - // comma-separated list of hosts, to which each proxy appends the - // requested host. We want to grab the first from this comma-separated - // list. - hosts := strings.SplitN(forwardedHost, ",", 2) - host = strings.TrimSpace(hosts[0]) - } else if addr, exists := forwardedHeader["for"]; exists { - host = addr - } else if h, exists := forwardedHeader["host"]; exists { - host = h - } - - portLessHost, port := host, "" - if !isIPv6Address(portLessHost) { - // with go 1.6, this would treat the last part of IPv6 address as a port - portLessHost, port, _ = net.SplitHostPort(host) - } - if forwardedPort := r.Header.Get("X-Forwarded-Port"); len(port) == 0 && len(forwardedPort) > 0 { - ports := strings.SplitN(forwardedPort, ",", 2) - forwardedPort = strings.TrimSpace(ports[0]) - if _, err := strconv.ParseInt(forwardedPort, 10, 32); err == nil { - port = forwardedPort - } - } - - if len(portLessHost) > 0 { - host = portLessHost - } - if len(port) > 0 { - // remove enclosing brackets of ipv6 address otherwise they will be duplicated - if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] - } - // JoinHostPort properly encloses ipv6 addresses in square brackets - host = net.JoinHostPort(host, port) - } else if isIPv6Address(host) && host[0] != '[' { - // ipv6 needs to be enclosed in square brackets in urls - host = "[" + host + "]" - } - - basePath := routeDescriptorsMap[RouteNameBase].Path - - requestPath := r.URL.Path - index := strings.Index(requestPath, basePath) - - u := &url.URL{ - Scheme: scheme, - Host: host, - } - - if index > 0 { - // N.B. index+1 is important because we want to include the trailing / - u.Path = requestPath[0 : index+1] - } - - return NewURLBuilder(u, relative) -} - -// BuildBaseURL constructs a base url for the API, typically just "/v2/". -func (ub *URLBuilder) BuildBaseURL() (string, error) { - route := ub.cloneRoute(RouteNameBase) - - baseURL, err := route.URL() - if err != nil { - return "", err - } - - return baseURL.String(), nil -} - -// BuildCatalogURL constructs a url get a catalog of repositories -func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameCatalog) - - catalogURL, err := route.URL() - if err != nil { - return "", err - } - - return appendValuesURL(catalogURL, values...).String(), nil -} - -// BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameTags) - - tagsURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return tagsURL.String(), nil -} - -// BuildManifestURL constructs a url for the manifest identified by name and -// reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameManifest) - - tagOrDigest := "" - switch v := ref.(type) { - case reference.Tagged: - tagOrDigest = v.Tag() - case reference.Digested: - tagOrDigest = v.Digest().String() - } - - manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) - if err != nil { - return "", err - } - - return manifestURL.String(), nil -} - -// BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { - route := ub.cloneRoute(RouteNameBlob) - - layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) - if err != nil { - return "", err - } - - return layerURL.String(), nil -} - -// BuildBlobUploadURL constructs a url to begin a blob upload in the -// repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUpload) - - uploadURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, -// including any url values. This should generally not be used by clients, as -// this url is provided by server implementations during the blob upload -// process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUploadChunk) - - uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// clondedRoute returns a clone of the named route from the router. Routes -// must be cloned to avoid modifying them during url generation. -func (ub *URLBuilder) cloneRoute(name string) clonedRoute { - route := new(mux.Route) - root := new(url.URL) - - *route = *ub.router.GetRoute(name) // clone the route - *root = *ub.root - - return clonedRoute{Route: route, root: root, relative: ub.relative} -} - -type clonedRoute struct { - *mux.Route - root *url.URL - relative bool -} - -func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { - routeURL, err := cr.Route.URL(pairs...) - if err != nil { - return nil, err - } - - if cr.relative { - return routeURL, nil - } - - if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { - routeURL.Path = routeURL.Path[1:] - } - - url := cr.root.ResolveReference(routeURL) - url.Scheme = cr.root.Scheme - return url, nil -} - -// appendValuesURL appends the parameters to the url. -func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { - merged := u.Query() - - for _, v := range values { - for k, vv := range v { - merged[k] = append(merged[k], vv...) - } - } - - u.RawQuery = merged.Encode() - return u -} - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} - -// isIPv6Address returns true if given string is a valid IPv6 address. No port is allowed. The address may be -// enclosed in square brackets. -func isIPv6Address(host string) bool { - if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] - } - // The IPv6 scoped addressing zone identifier starts after the last percent sign. - if i := strings.LastIndexByte(host, '%'); i > 0 { - host = host[:i] - } - ip := net.ParseIP(host) - if ip == nil { - return false - } - if ip.To16() == nil { - return false - } - if ip.To4() == nil { - return true - } - // dot can be present in ipv4-mapped address, it needs to come after a colon though - i := strings.IndexAny(host, ":.") - return i >= 0 && host[i] == ':' -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/api_version.go b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go deleted file mode 100644 index 7d8f1d957..000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/api_version.go +++ /dev/null @@ -1,58 +0,0 @@ -package auth - -import ( - "net/http" - "strings" -) - -// APIVersion represents a version of an API including its -// type and version number. -type APIVersion struct { - // Type refers to the name of a specific API specification - // such as "registry" - Type string - - // Version is the version of the API specification implemented, - // This may omit the revision number and only include - // the major and minor version, such as "2.0" - Version string -} - -// String returns the string formatted API Version -func (v APIVersion) String() string { - return v.Type + "/" + v.Version -} - -// APIVersions gets the API versions out of an HTTP response using the provided -// version header as the key for the HTTP header. -func APIVersions(resp *http.Response, versionHeader string) []APIVersion { - versions := []APIVersion{} - if versionHeader != "" { - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { - for _, version := range strings.Fields(supportedVersions) { - versions = append(versions, ParseAPIVersion(version)) - } - } - } - return versions -} - -// ParseAPIVersion parses an API version string into an APIVersion -// Format (Expected, not enforced): -// API version string = '/' -// API type = [a-z][a-z0-9]* -// API version = [0-9]+(\.[0-9]+)? -// TODO(dmcgowan): Enforce format, add error condition, remove unknown type -func ParseAPIVersion(versionStr string) APIVersion { - idx := strings.IndexRune(versionStr, '/') - if idx == -1 { - return APIVersion{ - Type: "unknown", - Version: versionStr, - } - } - return APIVersion{ - Type: strings.ToLower(versionStr[:idx]), - Version: versionStr[idx+1:], - } -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go deleted file mode 100644 index 2c3ebe165..000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go +++ /dev/null @@ -1,27 +0,0 @@ -package challenge - -import ( - "net/url" - "strings" -) - -// FROM: https://golang.org/src/net/http/http.go -// Given a string of the form "host", "host:port", or "[ipv6::address]:port", -// return true if the string includes a port. -func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } - -// FROM: http://golang.org/src/net/http/transport.go -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -// canonicalAddr returns url.Host but always with a ":port" suffix -// FROM: http://golang.org/src/net/http/transport.go -func canonicalAddr(url *url.URL) string { - addr := url.Host - if !hasPort(addr) { - return addr + ":" + portMap[url.Scheme] - } - return addr -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go deleted file mode 100644 index c9bdfc355..000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ /dev/null @@ -1,237 +0,0 @@ -package challenge - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "sync" -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // Scheme is the auth-scheme according to RFC 2617 - Scheme string - - // Parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -// Manager manages the challenges for endpoints. -// The challenges are pulled out of HTTP responses. Only -// responses which expect challenges should be added to -// the manager, since a non-unauthorized request will be -// viewed as not requiring challenges. -type Manager interface { - // GetChallenges returns the challenges for the given - // endpoint URL. - GetChallenges(endpoint url.URL) ([]Challenge, error) - - // AddResponse adds the response to the challenge - // manager. The challenges will be parsed out of - // the WWW-Authenicate headers and added to the - // URL which was produced the response. If the - // response was authorized, any challenges for the - // endpoint will be cleared. - AddResponse(resp *http.Response) error -} - -// NewSimpleManager returns an instance of -// Manger which only maps endpoints to challenges -// based on the responses which have been added the -// manager. The simple manager will make no attempt to -// perform requests on the endpoints or cache the responses -// to a backend. -func NewSimpleManager() Manager { - return &simpleManager{ - Challanges: make(map[string][]Challenge), - } -} - -type simpleManager struct { - sync.RWMutex - Challanges map[string][]Challenge -} - -func normalizeURL(endpoint *url.URL) { - endpoint.Host = strings.ToLower(endpoint.Host) - endpoint.Host = canonicalAddr(endpoint) -} - -func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { - normalizeURL(&endpoint) - - m.RLock() - defer m.RUnlock() - challenges := m.Challanges[endpoint.String()] - return challenges, nil -} - -func (m *simpleManager) AddResponse(resp *http.Response) error { - challenges := ResponseChallenges(resp) - if resp.Request == nil { - return fmt.Errorf("missing request reference") - } - urlCopy := url.URL{ - Path: resp.Request.URL.Path, - Host: resp.Request.URL.Host, - Scheme: resp.Request.URL.Scheme, - } - normalizeURL(&urlCopy) - - m.Lock() - defer m.Unlock() - m.Challanges[urlCopy.String()] = challenges - return nil -} - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ResponseChallenges returns a list of authorization challenges -// for the given http Response. Challenges are only checked if -// the response status code was a 401. -func ResponseChallenges(resp *http.Response) []Challenge { - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header) - } - - return nil -} - -func parseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go deleted file mode 100644 index d6d884ffd..000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ /dev/null @@ -1,503 +0,0 @@ -package auth - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" -) - -var ( - // ErrNoBasicAuthCredentials is returned if a request can't be authorized with - // basic auth due to lack of credentials. - ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") - - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") -) - -const defaultClientID = "registry-client" - -// AuthenticationHandler is an interface for authorizing a request from -// params from a "WWW-Authenicate" header for a single scheme. -type AuthenticationHandler interface { - // Scheme returns the scheme as expected from the "WWW-Authenicate" header. - Scheme() string - - // AuthorizeRequest adds the authorization header to a request (if needed) - // using the parameters from "WWW-Authenticate" method. The parameters - // values depend on the scheme. - AuthorizeRequest(req *http.Request, params map[string]string) error -} - -// CredentialStore is an interface for getting credentials for -// a given URL -type CredentialStore interface { - // Basic returns basic auth for the given URL - Basic(*url.URL) (string, string) - - // RefreshToken returns a refresh token for the - // given URL and service - RefreshToken(*url.URL, string) string - - // SetRefreshToken sets the refresh token if none - // is provided for the given url and service - SetRefreshToken(realm *url.URL, service, token string) -} - -// NewAuthorizer creates an authorizer which can handle multiple authentication -// schemes. The handlers are tried in order, the higher priority authentication -// methods should be first. The challengeMap holds a list of challenges for -// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). -func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier { - return &endpointAuthorizer{ - challenges: manager, - handlers: handlers, - } -} - -type endpointAuthorizer struct { - challenges challenge.Manager - handlers []AuthenticationHandler - transport http.RoundTripper -} - -func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { - pingPath := req.URL.Path - if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { - pingPath = pingPath[:v2Root+4] - } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { - pingPath = pingPath[:v1Root] + "/v2/" - } else { - return nil - } - - ping := url.URL{ - Host: req.URL.Host, - Scheme: req.URL.Scheme, - Path: pingPath, - } - - challenges, err := ea.challenges.GetChallenges(ping) - if err != nil { - return err - } - - if len(challenges) > 0 { - for _, handler := range ea.handlers { - for _, c := range challenges { - if c.Scheme != handler.Scheme() { - continue - } - if err := handler.AuthorizeRequest(req, c.Parameters); err != nil { - return err - } - } - } - } - - return nil -} - -// This is the minimum duration a token can last (in seconds). -// A token must not live less than 60 seconds because older versions -// of the Docker client didn't read their expiration from the token -// response and assumed 60 seconds. So to remain compatible with -// those implementations, a token must live at least this long. -const minimumTokenLifetimeSeconds = 60 - -// Private interface for time used by this package to enable tests to provide their own implementation. -type clock interface { - Now() time.Time -} - -type tokenHandler struct { - header http.Header - creds CredentialStore - transport http.RoundTripper - clock clock - - offlineAccess bool - forceOAuth bool - clientID string - scopes []Scope - - tokenLock sync.Mutex - tokenCache string - tokenExpiration time.Time -} - -// Scope is a type which is serializable to a string -// using the allow scope grammar. -type Scope interface { - String() string -} - -// RepositoryScope represents a token scope for access -// to a repository. -type RepositoryScope struct { - Repository string - Class string - Actions []string -} - -// String returns the string representation of the repository -// using the scope grammar -func (rs RepositoryScope) String() string { - repoType := "repository" - if rs.Class != "" { - repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) - } - return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) -} - -// RegistryScope represents a token scope for access -// to resources in the registry. -type RegistryScope struct { - Name string - Actions []string -} - -// String returns the string representation of the user -// using the scope grammar -func (rs RegistryScope) String() string { - return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) -} - -// TokenHandlerOptions is used to configure a new token handler -type TokenHandlerOptions struct { - Transport http.RoundTripper - Credentials CredentialStore - - OfflineAccess bool - ForceOAuth bool - ClientID string - Scopes []Scope -} - -// An implementation of clock for providing real time data. -type realClock struct{} - -// Now implements clock -func (realClock) Now() time.Time { return time.Now() } - -// NewTokenHandler creates a new AuthenicationHandler which supports -// fetching tokens from a remote token server. -func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { - // Create options... - return NewTokenHandlerWithOptions(TokenHandlerOptions{ - Transport: transport, - Credentials: creds, - Scopes: []Scope{ - RepositoryScope{ - Repository: scope, - Actions: actions, - }, - }, - }) -} - -// NewTokenHandlerWithOptions creates a new token handler using the provided -// options structure. -func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { - handler := &tokenHandler{ - transport: options.Transport, - creds: options.Credentials, - offlineAccess: options.OfflineAccess, - forceOAuth: options.ForceOAuth, - clientID: options.ClientID, - scopes: options.Scopes, - clock: realClock{}, - } - - return handler -} - -func (th *tokenHandler) client() *http.Client { - return &http.Client{ - Transport: th.transport, - Timeout: 15 * time.Second, - } -} - -func (th *tokenHandler) Scheme() string { - return "bearer" -} - -func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - var additionalScopes []string - if fromParam := req.URL.Query().Get("from"); fromParam != "" { - additionalScopes = append(additionalScopes, RepositoryScope{ - Repository: fromParam, - Actions: []string{"pull"}, - }.String()) - } - - token, err := th.getToken(params, additionalScopes...) - if err != nil { - return err - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - - return nil -} - -func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { - th.tokenLock.Lock() - defer th.tokenLock.Unlock() - scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) - for _, scope := range th.scopes { - scopes = append(scopes, scope.String()) - } - var addedScopes bool - for _, scope := range additionalScopes { - scopes = append(scopes, scope) - addedScopes = true - } - - now := th.clock.Now() - if now.After(th.tokenExpiration) || addedScopes { - token, expiration, err := th.fetchToken(params, scopes) - if err != nil { - return "", err - } - - // do not update cache for added scope tokens - if !addedScopes { - th.tokenCache = token - th.tokenExpiration = expiration - } - - return token, nil - } - - return th.tokenCache, nil -} - -type postTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { - form := url.Values{} - form.Set("scope", strings.Join(scopes, " ")) - form.Set("service", service) - - clientID := th.clientID - if clientID == "" { - // Use default client, this is a required field - clientID = defaultClientID - } - form.Set("client_id", clientID) - - if refreshToken != "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", refreshToken) - } else if th.creds != nil { - form.Set("grant_type", "password") - username, password := th.creds.Basic(realm) - form.Set("username", username) - form.Set("password", password) - - // attempt to get a refresh token - form.Set("access_type", "offline") - } else { - // refuse to do oauth without a grant type - return "", time.Time{}, fmt.Errorf("no supported grant type") - } - - resp, err := th.client().PostForm(realm.String(), form) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr postTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -type getTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { - - req, err := http.NewRequest("GET", realm.String(), nil) - if err != nil { - return "", time.Time{}, err - } - - reqParams := req.URL.Query() - - if service != "" { - reqParams.Add("service", service) - } - - for _, scope := range scopes { - reqParams.Add("scope", scope) - } - - if th.offlineAccess { - reqParams.Add("offline_token", "true") - clientID := th.clientID - if clientID == "" { - clientID = defaultClientID - } - reqParams.Add("client_id", clientID) - } - - if th.creds != nil { - username, password := th.creds.Basic(realm) - if username != "" && password != "" { - reqParams.Add("account", username) - req.SetBasicAuth(username, password) - } - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := th.client().Do(req) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr getTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && th.creds != nil { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return "", time.Time{}, ErrNoToken - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { - realm, ok := params["realm"] - if !ok { - return "", time.Time{}, errors.New("no realm specified for token auth challenge") - } - - // TODO(dmcgowan): Handle empty scheme and relative realm - realmURL, err := url.Parse(realm) - if err != nil { - return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - service := params["service"] - - var refreshToken string - - if th.creds != nil { - refreshToken = th.creds.RefreshToken(realmURL, service) - } - - if refreshToken != "" || th.forceOAuth { - return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) - } - - return th.fetchTokenWithBasicAuth(realmURL, service, scopes) -} - -type basicHandler struct { - creds CredentialStore -} - -// NewBasicHandler creaters a new authentiation handler which adds -// basic authentication credentials to a request. -func NewBasicHandler(creds CredentialStore) AuthenticationHandler { - return &basicHandler{ - creds: creds, - } -} - -func (*basicHandler) Scheme() string { - return "basic" -} - -func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if bh.creds != nil { - username, password := bh.creds.Basic(req.URL) - if username != "" && password != "" { - req.SetBasicAuth(username, password) - return nil - } - } - return ErrNoBasicAuthCredentials -} diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go deleted file mode 100644 index e3ffcb00f..000000000 --- a/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ /dev/null @@ -1,162 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -type httpBlobUpload struct { - statter distribution.BlobStatter - client *http.Client - - uuid string - startedAt time.Time - - location string // always the last value of the location header. - offset int64 - closed bool -} - -func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { - panic("Not implemented") -} - -func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUploadUnknown - } - return HandleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) - if err != nil { - return 0, err - } - defer req.Body.Close() - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int64 - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Size() int64 { - return hbu.offset -} - -func (hbu *httpBlobUpload) ID() string { - return hbu.uuid -} - -func (hbu *httpBlobUpload) StartedAt() time.Time { - return hbu.startedAt -} - -func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hbu.location, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - values := req.URL.Query() - values.Set("digest", desc.Digest.String()) - req.URL.RawQuery = values.Encode() - - resp, err := hbu.client.Do(req) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if !SuccessStatus(resp.StatusCode) { - return distribution.Descriptor{}, hbu.handleErrorResponse(resp) - } - - return hbu.statter.Stat(ctx, desc.Digest) -} - -func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - req, err := http.NewRequest("DELETE", hbu.location, nil) - if err != nil { - return err - } - resp, err := hbu.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { - return nil - } - return hbu.handleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) Close() error { - hbu.closed = true - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go deleted file mode 100644 index 52d49d5d2..000000000 --- a/vendor/github.com/docker/distribution/registry/client/errors.go +++ /dev/null @@ -1,139 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth/challenge" -) - -// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty -// errcode.Errors slice. -var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string -} - -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) -} - -// UnexpectedHTTPResponseError is returned when an expected HTTP status code -// is returned, but the content was unexpected and failed to be parsed. -type UnexpectedHTTPResponseError struct { - ParseErr error - StatusCode int - Response []byte -} - -func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) -} - -func parseHTTPErrorResponse(statusCode int, r io.Reader) error { - var errors errcode.Errors - body, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) - default: - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) - } - } - - if err := json.Unmarshal(body, &errors); err != nil { - return &UnexpectedHTTPResponseError{ - ParseErr: err, - StatusCode: statusCode, - Response: body, - } - } - - if len(errors) == 0 { - // If there was no error specified in the body, return - // UnexpectedHTTPResponseError. - return &UnexpectedHTTPResponseError{ - ParseErr: ErrNoErrorsInBody, - StatusCode: statusCode, - Response: body, - } - } - - return errors -} - -func makeErrorList(err error) []error { - if errL, ok := err.(errcode.Errors); ok { - return []error(errL) - } - return []error{err} -} - -func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) -} - -// HandleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func HandleErrorResponse(resp *http.Response) error { - if resp.StatusCode >= 400 && resp.StatusCode < 500 { - // Check for OAuth errors within the `WWW-Authenticate` header first - // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range challenge.ResponseChallenges(resp) { - if c.Scheme == "bearer" { - var err errcode.Error - // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 - switch c.Parameters["error"] { - case "invalid_token": - err.Code = errcode.ErrorCodeUnauthorized - case "insufficient_scope": - err.Code = errcode.ErrorCodeDenied - default: - continue - } - if description := c.Parameters["error_description"]; description != "" { - err.Message = description - } else { - err.Message = err.Code.Message() - } - - return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) - } - } - err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) - if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { - return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) - } - return err - } - return &UnexpectedHTTPStatusError{Status: resp.Status} -} - -// SuccessStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func SuccessStatus(status int) bool { - return status >= 200 && status <= 399 -} diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go deleted file mode 100644 index 1ebd0b183..000000000 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ /dev/null @@ -1,853 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/cache/memory" -) - -// Registry provides an interface for calling Repositories, which returns a catalog of repositories. -type Registry interface { - Repositories(ctx context.Context, repos []string, last string) (n int, err error) -} - -// checkHTTPRedirect is a callback that can manipulate redirected HTTP -// requests. It is used to preserve Accept and Range headers. -func checkHTTPRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - - if len(via) > 0 { - for headerName, headerVals := range via[0].Header { - if headerName != "Accept" && headerName != "Range" { - continue - } - for _, val := range headerVals { - // Don't add to redirected request if redirected - // request already has a header with the same - // name and value. - hasValue := false - for _, existingVal := range req.Header[headerName] { - if existingVal == val { - hasValue = true - break - } - } - if !hasValue { - req.Header.Add(headerName, val) - } - } - } - } - - return nil -} - -// NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - CheckRedirect: checkHTTPRedirect, - } - - return ®istry{ - client: client, - ub: ub, - context: ctx, - }, nil -} - -type registry struct { - client *http.Client - ub *v2.URLBuilder - context context.Context -} - -// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size -// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there -// are no more entries -func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { - var numFilled int - var returnErr error - - values := buildCatalogValues(len(entries), last) - u, err := r.ub.BuildCatalogURL(values) - if err != nil { - return 0, err - } - - resp, err := r.client.Get(u) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) - - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } - - for cnt := range ctlg.Repositories { - entries[cnt] = ctlg.Repositories[cnt] - } - numFilled = len(ctlg.Repositories) - - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - } else { - return 0, HandleErrorResponse(resp) - } - - return numFilled, returnErr -} - -// NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - CheckRedirect: checkHTTPRedirect, - // TODO(dmcgowan): create cookie jar - } - - return &repository{ - client: client, - ub: ub, - name: name, - context: ctx, - }, nil -} - -type repository struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named -} - -func (r *repository) Named() reference.Named { - return r.name -} - -func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { - statter := &blobStatter{ - name: r.name, - ub: r.ub, - client: r.client, - } - return &blobs{ - name: r.name, - ub: r.ub, - client: r.client, - statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), - } -} - -func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // todo(richardscothern): options should be sent over the wire - return &manifests{ - name: r.name, - ub: r.ub, - client: r.client, - etags: make(map[string]string), - }, nil -} - -func (r *repository) Tags(ctx context.Context) distribution.TagService { - return &tags{ - client: r.client, - ub: r.ub, - context: r.context, - name: r.Named(), - } -} - -// tags implements remote tagging operations. -type tags struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named -} - -// All returns all tags -func (t *tags) All(ctx context.Context) ([]string, error) { - var tags []string - - u, err := t.ub.BuildTagsURL(t.name) - if err != nil { - return tags, err - } - - for { - resp, err := t.client.Get(u) - if err != nil { - return tags, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return tags, err - } - - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err - } - tags = append(tags, tagsResponse.Tags...) - if link := resp.Header.Get("Link"); link != "" { - u = strings.Trim(strings.Split(link, ";")[0], "<>") - } else { - return tags, nil - } - } else { - return tags, HandleErrorResponse(resp) - } - } -} - -func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { - desc := distribution.Descriptor{} - headers := response.Header - - ctHeader := headers.Get("Content-Type") - if ctHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") - } - desc.MediaType = ctHeader - - digestHeader := headers.Get("Docker-Content-Digest") - if digestHeader == "" { - bytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return distribution.Descriptor{}, err - } - _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - - dgst, err := digest.ParseDigest(digestHeader) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Digest = dgst - - lengthHeader := headers.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") - } - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Size = length - - return desc, nil - -} - -// Get issues a HEAD request for a Manifest against its named endpoint in order -// to construct a descriptor for the tag. If the registry doesn't support HEADing -// a manifest, fallback to GET. -func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - ref, err := reference.WithTag(t.name, tag) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := t.ub.BuildManifestURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - newRequest := func(method string) (*http.Response, error) { - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - resp, err := t.client.Do(req) - return resp, err - } - - resp, err := newRequest("HEAD") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400: - return descriptorFromResponse(resp) - default: - // if the response is an error - there will be no body to decode. - // Issue a GET request: - // - for data from a server that does not handle HEAD - // - to get error details in case of a failure - resp, err = newRequest("GET") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 200 && resp.StatusCode < 400 { - return descriptorFromResponse(resp) - } - return distribution.Descriptor{}, HandleErrorResponse(resp) - } -} - -func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - panic("not implemented") -} - -func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - panic("not implemented") -} - -func (t *tags) Untag(ctx context.Context, tag string) error { - panic("not implemented") -} - -type manifests struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - etags map[string]string -} - -func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return false, err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return false, err - } - - resp, err := ms.client.Head(u) - if err != nil { - return false, err - } - - if SuccessStatus(resp.StatusCode) { - return true, nil - } else if resp.StatusCode == http.StatusNotFound { - return false, nil - } - return false, HandleErrorResponse(resp) -} - -// AddEtagToTag allows a client to supply an eTag to Get which will be -// used for a conditional HTTP request. If the eTag matches, a nil manifest -// and ErrManifestNotModified error will be returned. etag is automatically -// quoted when added to this map. -func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return etagOption{tag, etag} -} - -type etagOption struct{ tag, etag string } - -func (o etagOption) Apply(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") -} - -// ReturnContentDigest allows a client to set a the content digest on -// a successful request from the 'Docker-Content-Digest' header. This -// returned digest is represents the digest which the registry uses -// to refer to the content and can be used to delete the content. -func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { - return contentDigestOption{dgst} -} - -type contentDigestOption struct{ digest *digest.Digest } - -func (o contentDigestOption) Apply(ms distribution.ManifestService) error { - return nil -} - -func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - var ( - digestOrTag string - ref reference.Named - err error - contentDgst *digest.Digest - ) - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - digestOrTag = opt.Tag - ref, err = reference.WithTag(ms.name, opt.Tag) - if err != nil { - return nil, err - } - } else if opt, ok := option.(contentDigestOption); ok { - contentDgst = opt.digest - } else { - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - } - - if digestOrTag == "" { - digestOrTag = dgst.String() - ref, err = reference.WithDigest(ms.name, dgst) - if err != nil { - return nil, err - } - } - - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - - if _, ok := ms.etags[digestOrTag]; ok { - req.Header.Set("If-None-Match", ms.etags[digestOrTag]) - } - - resp, err := ms.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return nil, distribution.ErrManifestNotModified - } else if SuccessStatus(resp.StatusCode) { - if contentDgst != nil { - dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest")) - if err == nil { - *contentDgst = dgst - } - } - mt := resp.Header.Get("Content-Type") - body, err := ioutil.ReadAll(resp.Body) - - if err != nil { - return nil, err - } - m, _, err := distribution.UnmarshalManifest(mt, body) - if err != nil { - return nil, err - } - return m, nil - } - return nil, HandleErrorResponse(resp) -} - -// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. -func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - ref := ms.name - var tagged bool - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - var err error - ref, err = reference.WithTag(ref, opt.Tag) - if err != nil { - return "", err - } - tagged = true - } else { - err := option.Apply(ms) - if err != nil { - return "", err - } - } - } - mediaType, p, err := m.Payload() - if err != nil { - return "", err - } - - if !tagged { - // generate a canonical digest and Put by digest - _, d, err := distribution.UnmarshalManifest(mediaType, p) - if err != nil { - return "", err - } - ref, err = reference.WithDigest(ref, d.Digest) - if err != nil { - return "", err - } - } - - manifestURL, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return "", err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) - if err != nil { - return "", err - } - - putRequest.Header.Set("Content-Type", mediaType) - - resp, err := ms.client.Do(putRequest) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.ParseDigest(dgstHeader) - if err != nil { - return "", err - } - - return dgst, nil - } - - return "", HandleErrorResponse(resp) -} - -func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return err - } - req, err := http.NewRequest("DELETE", u, nil) - if err != nil { - return err - } - - resp, err := ms.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -// todo(richardscothern): Restore interface and implementation with merge of #1050 -/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - panic("not supported") -}*/ - -type blobs struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - - statter distribution.BlobDescriptorService - distribution.BlobDeleter -} - -func sanitizeLocation(location, base string) (string, error) { - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - - locationURL, err := url.Parse(location) - if err != nil { - return "", err - } - - return baseURL.ResolveReference(locationURL).String(), nil -} - -func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return bs.statter.Stat(ctx, dgst) - -} - -func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - reader, err := bs.Open(ctx, dgst) - if err != nil { - return nil, err - } - defer reader.Close() - - return ioutil.ReadAll(reader) -} - -func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return nil, err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, - func(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUnknown - } - return HandleErrorResponse(resp) - }), nil -} - -func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - panic("not implemented") -} - -func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - dgstr := digest.Canonical.New() - n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) - if err != nil { - return distribution.Descriptor{}, err - } - if n < int64(len(p)) { - return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) - } - - desc := distribution.Descriptor{ - MediaType: mediaType, - Size: int64(len(p)), - Digest: dgstr.Digest(), - } - - return writer.Commit(ctx, desc) -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*distribution.CreateOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - var opts distribution.CreateOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - var values []url.Values - - if opts.Mount.ShouldMount { - values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) - } - - u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) - if err != nil { - return nil, err - } - - resp, err := bs.client.Post(u, "", nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) - if err != nil { - return nil, err - } - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - case http.StatusAccepted: - // TODO(dmcgowan): Check for invalid UUID - uuid := resp.Header.Get("Docker-Upload-UUID") - location, err := sanitizeLocation(resp.Header.Get("Location"), u) - if err != nil { - return nil, err - } - - return &httpBlobUpload{ - statter: bs.statter, - client: bs.client, - uuid: uuid, - startedAt: time.Now(), - location: location, - }, nil - default: - return nil, HandleErrorResponse(resp) - } -} - -func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - panic("not implemented") -} - -func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { - return bs.statter.Clear(ctx, dgst) -} - -type blobStatter struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client -} - -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - resp, err := bs.client.Head(u) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - lengthHeader := resp.Header.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) - } - - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) - } - - return distribution.Descriptor{ - MediaType: resp.Header.Get("Content-Type"), - Size: length, - Digest: dgst, - }, nil - } else if resp.StatusCode == http.StatusNotFound { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - return distribution.Descriptor{}, HandleErrorResponse(resp) -} - -func buildCatalogValues(maxEntries int, last string) url.Values { - values := url.Values{} - - if maxEntries > 0 { - values.Add("n", strconv.Itoa(maxEntries)) - } - - if last != "" { - values.Add("last", last) - } - - return values -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return err - } - - req, err := http.NewRequest("DELETE", blobURL, nil) - if err != nil { - return err - } - - resp, err := bs.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go deleted file mode 100644 index e5ff09d75..000000000 --- a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go +++ /dev/null @@ -1,251 +0,0 @@ -package transport - -import ( - "errors" - "fmt" - "io" - "net/http" - "os" - "regexp" - "strconv" -) - -var ( - contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) - - // ErrWrongCodeForByteRange is returned if the client sends a request - // with a Range header but the server returns a 2xx or 3xx code other - // than 206 Partial Content. - ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") -) - -// ReadSeekCloser combines io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET -// request. When seeking and starting a read from a non-zero offset -// the a "Range" header will be added which sets the offset. -// TODO(dmcgowan): Move this into a separate utility package -func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { - return &httpReadSeeker{ - client: client, - url: url, - errorHandler: errorHandler, - } -} - -type httpReadSeeker struct { - client *http.Client - url string - - // errorHandler creates an error from an unsuccessful HTTP response. - // This allows the error to be created with the HTTP response body - // without leaking the body through a returned error. - errorHandler func(*http.Response) error - - size int64 - - // rc is the remote read closer. - rc io.ReadCloser - // readerOffset tracks the offset as of the last read. - readerOffset int64 - // seekOffset allows Seek to override the offset. Seek changes - // seekOffset instead of changing readOffset directly so that - // connection resets can be delayed and possibly avoided if the - // seek is undone (i.e. seeking to the end and then back to the - // beginning). - seekOffset int64 - err error -} - -func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { - if hrs.err != nil { - return 0, hrs.err - } - - // If we sought to a different position, we need to reset the - // connection. This logic is here instead of Seek so that if - // a seek is undone before the next read, the connection doesn't - // need to be closed and reopened. A common example of this is - // seeking to the end to determine the length, and then seeking - // back to the original position. - if hrs.readerOffset != hrs.seekOffset { - hrs.reset() - } - - hrs.readerOffset = hrs.seekOffset - - rd, err := hrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hrs.seekOffset += int64(n) - hrs.readerOffset += int64(n) - - return n, err -} - -func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { - if hrs.err != nil { - return 0, hrs.err - } - - lastReaderOffset := hrs.readerOffset - - if whence == os.SEEK_SET && hrs.rc == nil { - // If no request has been made yet, and we are seeking to an - // absolute position, set the read offset as well to avoid an - // unnecessary request. - hrs.readerOffset = offset - } - - _, err := hrs.reader() - if err != nil { - hrs.readerOffset = lastReaderOffset - return 0, err - } - - newOffset := hrs.seekOffset - - switch whence { - case os.SEEK_CUR: - newOffset += offset - case os.SEEK_END: - if hrs.size < 0 { - return 0, errors.New("content length not known") - } - newOffset = hrs.size + offset - case os.SEEK_SET: - newOffset = offset - } - - if newOffset < 0 { - err = errors.New("cannot seek to negative position") - } else { - hrs.seekOffset = newOffset - } - - return hrs.seekOffset, err -} - -func (hrs *httpReadSeeker) Close() error { - if hrs.err != nil { - return hrs.err - } - - // close and release reader chain - if hrs.rc != nil { - hrs.rc.Close() - } - - hrs.rc = nil - - hrs.err = errors.New("httpLayer: closed") - - return nil -} - -func (hrs *httpReadSeeker) reset() { - if hrs.err != nil { - return - } - if hrs.rc != nil { - hrs.rc.Close() - hrs.rc = nil - } -} - -func (hrs *httpReadSeeker) reader() (io.Reader, error) { - if hrs.err != nil { - return nil, hrs.err - } - - if hrs.rc != nil { - return hrs.rc, nil - } - - req, err := http.NewRequest("GET", hrs.url, nil) - if err != nil { - return nil, err - } - - if hrs.readerOffset > 0 { - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) - // TODO: get context in here - // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) - } - - req.Header.Add("Accept-Encoding", "identity") - resp, err := hrs.client.Do(req) - if err != nil { - return nil, err - } - - // Normally would use client.SuccessStatus, but that would be a cyclic - // import - if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - if hrs.readerOffset > 0 { - if resp.StatusCode != http.StatusPartialContent { - return nil, ErrWrongCodeForByteRange - } - - contentRange := resp.Header.Get("Content-Range") - if contentRange == "" { - return nil, errors.New("no Content-Range header found in HTTP 206 response") - } - - submatches := contentRangeRegexp.FindStringSubmatch(contentRange) - if len(submatches) < 4 { - return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) - } - - startByte, err := strconv.ParseUint(submatches[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) - } - - if startByte != uint64(hrs.readerOffset) { - return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) - } - - endByte, err := strconv.ParseUint(submatches[2], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) - } - - if submatches[3] == "*" { - hrs.size = -1 - } else { - size, err := strconv.ParseUint(submatches[3], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) - } - - if endByte+1 != size { - return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) - } - - hrs.size = int64(size) - } - } else if resp.StatusCode == http.StatusOK { - hrs.size = resp.ContentLength - } else { - hrs.size = -1 - } - hrs.rc = resp.Body - } else { - defer resp.Body.Close() - if hrs.errorHandler != nil { - return nil, hrs.errorHandler(resp) - } - return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) - } - - return hrs.rc, nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go deleted file mode 100644 index 30e45fab0..000000000 --- a/vendor/github.com/docker/distribution/registry/client/transport/transport.go +++ /dev/null @@ -1,147 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "sync" -) - -// RequestModifier represents an object which will do an inplace -// modification of an HTTP request. -type RequestModifier interface { - ModifyRequest(*http.Request) error -} - -type headerModifier http.Header - -// NewHeaderRequestModifier returns a new RequestModifier which will -// add the given headers to a request. -func NewHeaderRequestModifier(header http.Header) RequestModifier { - return headerModifier(header) -} - -func (h headerModifier) ModifyRequest(req *http.Request) error { - for k, s := range http.Header(h) { - req.Header[k] = append(req.Header[k], s...) - } - - return nil -} - -// NewTransport creates a new transport which will apply modifiers to -// the request on a RoundTrip call. -func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { - return &transport{ - Modifiers: modifiers, - Base: base, - } -} - -// transport is an http.RoundTripper that makes HTTP requests after -// copying and modifying the request -type transport struct { - Modifiers []RequestModifier - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := cloneRequest(req) - for _, modifier := range t.Modifiers { - if err := modifier.ModifyRequest(req2); err != nil { - return nil, err - } - } - - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go deleted file mode 100644 index 10a390919..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package cache provides facilities to speed up access to the storage -// backend. -package cache - -import ( - "fmt" - - "github.com/docker/distribution" -) - -// BlobDescriptorCacheProvider provides repository scoped -// BlobDescriptorService cache instances and a global descriptor cache. -type BlobDescriptorCacheProvider interface { - distribution.BlobDescriptorService - - RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) -} - -// ValidateDescriptor provides a helper function to ensure that caches have -// common criteria for admitting descriptors. -func ValidateDescriptor(desc distribution.Descriptor) error { - if err := desc.Digest.Validate(); err != nil { - return err - } - - if desc.Size < 0 { - return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) - } - - if desc.MediaType == "" { - return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go deleted file mode 100644 index 94ca8a90c..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go +++ /dev/null @@ -1,101 +0,0 @@ -package cache - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - - "github.com/docker/distribution" -) - -// Metrics is used to hold metric counters -// related to the number of times a cache was -// hit or missed. -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 -} - -// MetricsTracker represents a metric tracker -// which simply counts the number of hits and misses. -type MetricsTracker interface { - Hit() - Miss() - Metrics() Metrics -} - -type cachedBlobStatter struct { - cache distribution.BlobDescriptorService - backend distribution.BlobDescriptorService - tracker MetricsTracker -} - -// NewCachedBlobStatter creates a new statter which prefers a cache and -// falls back to a backend. -func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - } -} - -// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and -// falls back to a backend. Hits and misses will send to the tracker. -func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - tracker: tracker, - } -} - -func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - desc, err := cbds.cache.Stat(ctx, dgst) - if err != nil { - if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) - } - - goto fallback - } - - if cbds.tracker != nil { - cbds.tracker.Hit() - } - return desc, nil -fallback: - if cbds.tracker != nil { - cbds.tracker.Miss() - } - desc, err = cbds.backend.Stat(ctx, dgst) - if err != nil { - return desc, err - } - - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) - } - - return desc, err - -} - -func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - err := cbds.cache.Clear(ctx, dgst) - if err != nil { - return err - } - - err = cbds.backend.Clear(ctx, dgst) - if err != nil { - return err - } - return nil -} - -func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) - } - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go deleted file mode 100644 index cf125e187..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ /dev/null @@ -1,179 +0,0 @@ -package memory - -import ( - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" -) - -type inMemoryBlobDescriptorCacheProvider struct { - global *mapBlobDescriptorCache - repositories map[string]*mapBlobDescriptorCache - mu sync.RWMutex -} - -// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for -// storing blob descriptor data. -func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { - return &inMemoryBlobDescriptorCacheProvider{ - global: newMapBlobDescriptorCache(), - repositories: make(map[string]*mapBlobDescriptorCache), - } -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNamed(repo); err != nil { - return nil, err - } - - imbdcp.mu.RLock() - defer imbdcp.mu.RUnlock() - - return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: repo, - parent: imbdcp, - repository: imbdcp.repositories[repo], - }, nil -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return imbdcp.global.Stat(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { - return imbdcp.global.Clear(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - _, err := imbdcp.Stat(ctx, dgst) - if err == distribution.ErrBlobUnknown { - - if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { - // if the digests differ, set the other canonical mapping - if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { - return err - } - } - - // unknown, just set it - return imbdcp.global.SetDescriptor(ctx, dgst, desc) - } - - // we already know it, do nothing - return err -} - -// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped -// repository cache. Instances are not thread-safe but the delegated -// operations are. -type repositoryScopedInMemoryBlobDescriptorCache struct { - repo string - parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map - repository *mapBlobDescriptorCache -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return repo.Stat(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.ErrBlobUnknown - } - - return repo.Clear(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - if repo == nil { - // allocate map since we are setting it now. - var ok bool - // have to read back value since we may have allocated elsewhere. - repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] - if !ok { - repo = newMapBlobDescriptorCache() - rsimbdcp.parent.repositories[rsimbdcp.repo] = repo - } - rsimbdcp.repository = repo - } - rsimbdcp.parent.mu.Unlock() - - if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { - return err - } - - return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) -} - -// mapBlobDescriptorCache provides a simple map-based implementation of the -// descriptor cache. -type mapBlobDescriptorCache struct { - descriptors map[digest.Digest]distribution.Descriptor - mu sync.RWMutex -} - -var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} - -func newMapBlobDescriptorCache() *mapBlobDescriptorCache { - return &mapBlobDescriptorCache{ - descriptors: make(map[digest.Digest]distribution.Descriptor), - } -} - -func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - mbdc.mu.RLock() - defer mbdc.mu.RUnlock() - - desc, ok := mbdc.descriptors[dgst] - if !ok { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return desc, nil -} - -func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - delete(mbdc.descriptors, dgst) - return nil -} - -func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - mbdc.descriptors[dgst] = desc - return nil -} diff --git a/vendor/github.com/docker/distribution/uuid/uuid.go b/vendor/github.com/docker/distribution/uuid/uuid.go deleted file mode 100644 index d433ccaf5..000000000 --- a/vendor/github.com/docker/distribution/uuid/uuid.go +++ /dev/null @@ -1,126 +0,0 @@ -// Package uuid provides simple UUID generation. Only version 4 style UUIDs -// can be generated. -// -// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. -package uuid - -import ( - "crypto/rand" - "fmt" - "io" - "os" - "syscall" - "time" -) - -const ( - // Bits is the number of bits in a UUID - Bits = 128 - - // Size is the number of bytes in a UUID - Size = Bits / 8 - - format = "%08x-%04x-%04x-%04x-%012x" -) - -var ( - // ErrUUIDInvalid indicates a parsed string is not a valid uuid. - ErrUUIDInvalid = fmt.Errorf("invalid uuid") - - // Loggerf can be used to override the default logging destination. Such - // log messages in this library should be logged at warning or higher. - Loggerf = func(format string, args ...interface{}) {} -) - -// UUID represents a UUID value. UUIDs can be compared and set to other values -// and accessed by byte. -type UUID [Size]byte - -// Generate creates a new, version 4 uuid. -func Generate() (u UUID) { - const ( - // ensures we backoff for less than 450ms total. Use the following to - // select new value, in units of 10ms: - // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 - maxretries = 9 - backoff = time.Millisecond * 10 - ) - - var ( - totalBackoff time.Duration - count int - retries int - ) - - for { - // This should never block but the read may fail. Because of this, - // we just try to read the random number generator until we get - // something. This is a very rare condition but may happen. - b := time.Duration(retries) * backoff - time.Sleep(b) - totalBackoff += b - - n, err := io.ReadFull(rand.Reader, u[count:]) - if err != nil { - if retryOnError(err) && retries < maxretries { - count += n - retries++ - Loggerf("error generating version 4 uuid, retrying: %v", err) - continue - } - - // Any other errors represent a system problem. What did someone - // do to /dev/urandom? - panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) - } - - break - } - - u[6] = (u[6] & 0x0f) | 0x40 // set version byte - u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} - - return u -} - -// Parse attempts to extract a uuid from the string or returns an error. -func Parse(s string) (u UUID, err error) { - if len(s) != 36 { - return UUID{}, ErrUUIDInvalid - } - - // create stack addresses for each section of the uuid. - p := make([][]byte, 5) - - if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { - return u, err - } - - copy(u[0:4], p[0]) - copy(u[4:6], p[1]) - copy(u[6:8], p[2]) - copy(u[8:10], p[3]) - copy(u[10:16], p[4]) - - return -} - -func (u UUID) String() string { - return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) -} - -// retryOnError tries to detect whether or not retrying would be fruitful. -func retryOnError(err error) bool { - switch err := err.(type) { - case *os.PathError: - return retryOnError(err.Err) // unpack the target error - case syscall.Errno: - if err == syscall.EPERM { - // EPERM represents an entropy pool exhaustion, a condition under - // which we backoff and retry. - return true - } - } - - return false -} diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go deleted file mode 100644 index 056af6b84..000000000 --- a/vendor/github.com/docker/docker/api/types/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -package types - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go deleted file mode 100644 index abc0bba3f..000000000 --- a/vendor/github.com/docker/docker/api/types/backend/backend.go +++ /dev/null @@ -1,84 +0,0 @@ -// Package backend includes types to send information to server backends. -package backend - -import ( - "io" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/streamformatter" -) - -// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. -type ContainerAttachConfig struct { - GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) - UseStdin bool - UseStdout bool - UseStderr bool - Logs bool - Stream bool - DetachKeys string - - // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/sderr messages accordingly. - // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... - // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. - // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. - MuxStreams bool -} - -// ContainerLogsConfig holds configs for logging operations. Exists -// for users of the backend to to pass it a logging configuration. -type ContainerLogsConfig struct { - types.ContainerLogsOptions - OutStream io.Writer -} - -// ContainerStatsConfig holds information for configuring the runtime -// behavior of a backend.ContainerStats() call. -type ContainerStatsConfig struct { - Stream bool - OutStream io.Writer - Version string -} - -// ExecInspect holds information about a running process started -// with docker exec. -type ExecInspect struct { - ID string - Running bool - ExitCode *int - ProcessConfig *ExecProcessConfig - OpenStdin bool - OpenStderr bool - OpenStdout bool - CanRemove bool - ContainerID string - DetachKeys []byte - Pid int -} - -// ExecProcessConfig holds information about the exec process -// running on the host. -type ExecProcessConfig struct { - Tty bool `json:"tty"` - Entrypoint string `json:"entrypoint"` - Arguments []string `json:"arguments"` - Privileged *bool `json:"privileged,omitempty"` - User string `json:"user,omitempty"` -} - -// ContainerCommitConfig is a wrapper around -// types.ContainerCommitConfig that also -// transports configuration changes for a container. -type ContainerCommitConfig struct { - types.ContainerCommitConfig - Changes []string -} - -// ProgressWriter is an interface -// to transport progress streams. -type ProgressWriter struct { - Output io.Writer - StdoutFormatter *streamformatter.StdoutFormatter - StderrFormatter *streamformatter.StderrFormatter - ProgressReaderFunc func(io.ReadCloser) io.ReadCloser -} diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go deleted file mode 100644 index 931ae10ab..000000000 --- a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go deleted file mode 100644 index 7900d64f0..000000000 --- a/vendor/github.com/docker/docker/api/types/client.go +++ /dev/null @@ -1,378 +0,0 @@ -package types - -import ( - "bufio" - "io" - "net" - "os" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/go-units" -) - -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// CheckpointListOptions holds parameters to list checkpoints for a container -type CheckpointListOptions struct { - CheckpointDir string -} - -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container -type CheckpointDeleteOptions struct { - CheckpointID string - CheckpointDir string -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string - ContainerID string - Running bool - ExitCode int - Pid int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Quiet bool - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string - CheckpointDir string -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool -} - -// EventsOptions holds parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - NetworkMode string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - // See the parsing of buildArgs in api/server/router/build/build_routes.go - // for an explaination of why BuildArgs needs to use *string instead of - // just a string - BuildArgs map[string]*string - AuthConfigs map[string]AuthConfig - Context io.Reader - Labels map[string]string - // squash the resulting image's layers to the parent - // preserves the original image and creates a new one from the parent with all - // the changes applied to a single layer - Squash bool - // CacheFrom specifies images that are used for matching cache. Images - // specified here do not need to have a valid parent chain to match cache. - CacheFrom []string - SecurityOpt []string -} - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) - SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image -} - -// ImageListOptions holds parameters to filter the list of images with. -type ImageListOptions struct { - All bool - Filters filters.Args -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -//ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args - Limit int -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height uint - Width uint -} - -// VersionResponse holds version information for the client and the server -type VersionResponse struct { - Client *Version - Server *Version -} - -// ServerOK returns true when the client could connect to the docker server -// and parse the information received. It returns false otherwise. -func (v VersionResponse) ServerOK() bool { - return v.Server != nil -} - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filters filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string -} - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string - // Warnings is a set of non-fatal warning messages to pass on to the user. - Warnings []string `json:",omitempty"` -} - -// Values for RegistryAuthFrom in ServiceUpdateOptions -const ( - RegistryAuthFromSpec = "spec" - RegistryAuthFromPreviousSpec = "previous-spec" -) - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. - - // RegistryAuthFrom specifies where to find the registry authorization - // credentials if they are not given in EncodedRegistryAuth. Valid - // values are "spec" and "previous-spec". - RegistryAuthFrom string -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filters filters.Args -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filters filters.Args -} - -// PluginRemoveOptions holds parameters to remove plugins. -type PluginRemoveOptions struct { - Force bool -} - -// PluginEnableOptions holds parameters to enable plugins. -type PluginEnableOptions struct { - Timeout int -} - -// PluginDisableOptions holds parameters to disable plugins. -type PluginDisableOptions struct { - Force bool -} - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - RemoteRef string // RemoteRef is the plugin name on the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) - Args []string -} - -// SecretRequestOption is a type for requesting secrets -type SecretRequestOption struct { - Source string - Target string - UID string - GID string - Mode os.FileMode -} - -// SwarmUnlockKeyResponse contains the response for Engine API: -// GET /swarm/unlockkey -type SwarmUnlockKeyResponse struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// PluginCreateOptions hold all options to plugin create. -type PluginCreateOptions struct { - RepoName string -} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go deleted file mode 100644 index 20c19f213..000000000 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ /dev/null @@ -1,69 +0,0 @@ -package types - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ContainerCommitConfig contains build configs for commit operation, -// and is used when making a commit with the current state of the container. -type ContainerCommitConfig struct { - Pause bool - Repo string - Tag string - Author string - Comment string - // merge container config into commit config before commit - MergeConfigs bool - Config *container.Config -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Env []string // Environment variables - Cmd []string // Execution commands and args -} - -// PluginRmConfig holds arguments for plugin remove. -type PluginRmConfig struct { - ForceRemove bool -} - -// PluginEnableConfig holds arguments for plugin enable -type PluginEnableConfig struct { - Timeout int -} - -// PluginDisableConfig holds arguments for plugin disable. -type PluginDisableConfig struct { - ForceDisable bool -} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go deleted file mode 100644 index fc050e5db..000000000 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ /dev/null @@ -1,62 +0,0 @@ -package container - -import ( - "time" - - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go deleted file mode 100644 index d028e3b12..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ /dev/null @@ -1,21 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/swagger-gen.sh -// ---------------------------------------------------------------------------- - -// ContainerCreateCreatedBody container create created body -// swagger:model ContainerCreateCreatedBody -type ContainerCreateCreatedBody struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go deleted file mode 100644 index 81ee12c67..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ /dev/null @@ -1,17 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/swagger-gen.sh -// ---------------------------------------------------------------------------- - -// ContainerUpdateOKBody container update o k body -// swagger:model ContainerUpdateOKBody -type ContainerUpdateOKBody struct { - - // warnings - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go deleted file mode 100644 index 16cf33532..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ /dev/null @@ -1,17 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/swagger-gen.sh -// ---------------------------------------------------------------------------- - -// ContainerWaitOKBody container wait o k body -// swagger:model ContainerWaitOKBody -type ContainerWaitOKBody struct { - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go deleted file mode 100644 index 0c82d625e..000000000 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ /dev/null @@ -1,333 +0,0 @@ -package container - -import ( - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" - "github.com/docker/go-units" -) - -// NetworkMode represents the container network stack. -type NetworkMode string - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" -} - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IsPrivate indicates whether the container uses its private ipc stack. -func (n IpcMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's ipc stack. -func (n IpcMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's ipc stack. -func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the ipc stack is valid. -func (n IpcMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - parts := strings.SplitN(string(c), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - return c.IsContainer() || c == "" -} - -// Container returns the name of the container whose cgroup will be used. -func (c CgroupSpec) Container() string { - parts := strings.SplitN(string(c), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period - CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DiskQuota int64 // Disk limit (in bytes) - KernelMemory int64 // Kernel memory limit (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit int64 // Setting pids limit for a container - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - Runtime string `json:",omitempty"` // Runtime to use with this container - - // Applicable to Windows - ConsoleSize [2]uint // Initial console size (height,width) - Isolation Isolation // Isolation technology of the container (eg default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources - - // Mounts specs used by the container - Mounts []mount.Mount `json:",omitempty"` - - // Run a custom init inside the container, if null, use the daemon's configured settings - Init *bool `json:",omitempty"` - - // Custom init path - InitPath string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go deleted file mode 100644 index 9fb79bed6..000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build !windows - -package container - -import "strings" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go deleted file mode 100644 index 0ee332ba6..000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,87 +0,0 @@ -package container - -import ( - "strings" -) - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsContainer indicates whether container uses a container network stack. -// Returns false as windows doesn't support this mode -func (n NetworkMode) IsContainer() bool { - return false -} - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == "nat" -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// ConnectedContainer is the id of the container which network this container is connected to. -// Returns blank string on windows -func (n NetworkMode) ConnectedContainer() string { - return "" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" -} - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } else if n.IsBridge() { - return "nat" - } else if n.IsNone() { - return "none" - } else if n.IsUserDefined() { - return n.UserDefined() - } - - return "" -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go deleted file mode 100644 index dc942d9d9..000000000 --- a/vendor/github.com/docker/docker/api/types/error_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ErrorResponse Represents an error. -// swagger:model ErrorResponse -type ErrorResponse struct { - - // The error message. - // Required: true - Message string `json:"message"` -} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go deleted file mode 100644 index e01a41deb..000000000 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ /dev/null @@ -1,310 +0,0 @@ -// Package filters provides helper function to parse and handle command line -// filter, used for example in docker ps or docker images commands. -package filters - -import ( - "encoding/json" - "errors" - "fmt" - "regexp" - "strings" - - "github.com/docker/docker/api/types/versions" -) - -// Args stores filter arguments as map key:{map key: bool}. -// It contains an aggregation of the map of arguments (which are in the form -// of -f 'key=value') based on the key, and stores values for the same key -// in a map with string keys and boolean values. -// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' -// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} -type Args struct { - fields map[string]map[string]bool -} - -// NewArgs initializes a new Args struct. -func NewArgs() Args { - return Args{fields: map[string]map[string]bool{}} -} - -// ParseFlag parses the argument to the filter flag. Like -// -// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` -// -// If prev map is provided, then it is appended to, and returned. By default a new -// map is created. -func ParseFlag(arg string, prev Args) (Args, error) { - filters := prev - if len(arg) == 0 { - return filters, nil - } - - if !strings.Contains(arg, "=") { - return filters, ErrBadFormat - } - - f := strings.SplitN(arg, "=", 2) - - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - - filters.Add(name, value) - - return filters, nil -} - -// ErrBadFormat is an error returned in case of bad format for a filter. -var ErrBadFormat = errors.New("bad format of filter (expected name=value)") - -// ToParam packs the Args into a string for easy transport from client to server. -func ToParam(a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - buf, err := json.Marshal(a.fields) - if err != nil { - return "", err - } - return string(buf), nil -} - -// ToParamWithVersion packs the Args into a string for easy transport from client to server. -// The generated string will depend on the specified version (corresponding to the API version). -func ToParamWithVersion(version string, a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - // for daemons older than v1.10, filter must be of the form map[string][]string - buf := []byte{} - err := errors.New("") - if version != "" && versions.LessThan(version, "1.22") { - buf, err = json.Marshal(convertArgsToSlice(a.fields)) - } else { - buf, err = json.Marshal(a.fields) - } - if err != nil { - return "", err - } - return string(buf), nil -} - -// FromParam unpacks the filter Args. -func FromParam(p string) (Args, error) { - if len(p) == 0 { - return NewArgs(), nil - } - - r := strings.NewReader(p) - d := json.NewDecoder(r) - - m := map[string]map[string]bool{} - if err := d.Decode(&m); err != nil { - r.Seek(0, 0) - - // Allow parsing old arguments in slice format. - // Because other libraries might be sending them in this format. - deprecated := map[string][]string{} - if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { - m = deprecatedArgs(deprecated) - } else { - return NewArgs(), err - } - } - return Args{m}, nil -} - -// Get returns the list of values associates with a field. -// It returns a slice of strings to keep backwards compatibility with old code. -func (filters Args) Get(field string) []string { - values := filters.fields[field] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add adds a new value to a filter field. -func (filters Args) Add(name, value string) { - if _, ok := filters.fields[name]; ok { - filters.fields[name][value] = true - } else { - filters.fields[name] = map[string]bool{value: true} - } -} - -// Del removes a value from a filter field. -func (filters Args) Del(name, value string) { - if _, ok := filters.fields[name]; ok { - delete(filters.fields[name], value) - if len(filters.fields[name]) == 0 { - delete(filters.fields, name) - } - } -} - -// Len returns the number of fields in the arguments. -func (filters Args) Len() int { - return len(filters.fields) -} - -// MatchKVList returns true if the values for the specified field matches the ones -// from the sources. -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'label' and sources are {'label1': '1', 'label2': '2'} -// it returns true. -func (filters Args) MatchKVList(field string, sources map[string]string) bool { - fieldValues := filters.fields[field] - - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if len(sources) == 0 { - return false - } - - for name2match := range fieldValues { - testKV := strings.SplitN(name2match, "=", 2) - - v, ok := sources[testKV[0]] - if !ok { - return false - } - if len(testKV) == 2 && testKV[1] != v { - return false - } - } - - return true -} - -// Match returns true if the values for the specified field matches the source string -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'image.name' and source is 'ubuntu' -// it returns true. -func (filters Args) Match(field, source string) bool { - if filters.ExactMatch(field, source) { - return true - } - - fieldValues := filters.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// ExactMatch returns true if the source matches exactly one of the filters. -func (filters Args) ExactMatch(field, source string) bool { - fieldValues, ok := filters.fields[field] - //do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. -func (filters Args) UniqueExactMatch(field, source string) bool { - fieldValues := filters.fields[field] - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(filters.fields[field]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one of the filters, -// or the source has one of the filters as a prefix. -func (filters Args) FuzzyMatch(field, source string) bool { - if filters.ExactMatch(field, source) { - return true - } - - fieldValues := filters.fields[field] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Include returns true if the name of the field to filter is in the filters. -func (filters Args) Include(field string) bool { - _, ok := filters.fields[field] - return ok -} - -// Validate ensures that all the fields in the filter are valid. -// It returns an error as soon as it finds an invalid field. -func (filters Args) Validate(accepted map[string]bool) error { - for name := range filters.fields { - if !accepted[name] { - return fmt.Errorf("Invalid filter '%s'", name) - } - } - return nil -} - -// WalkValues iterates over the list of filtered values for a field. -// It stops the iteration if it finds an error and it returns that error. -func (filters Args) WalkValues(field string, op func(value string) error) error { - if _, ok := filters.fields[field]; !ok { - return nil - } - for v := range filters.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go deleted file mode 100644 index 7592d2f8b..000000000 --- a/vendor/github.com/docker/docker/api/types/id_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// IDResponse Response to an API call that returns just an Id -// swagger:model IdResponse -type IDResponse struct { - - // The id of the newly created object. - // Required: true - ID string `json:"Id"` -} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go deleted file mode 100644 index e145b3dcf..000000000 --- a/vendor/github.com/docker/docker/api/types/image_summary.go +++ /dev/null @@ -1,49 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageSummary image summary -// swagger:model ImageSummary -type ImageSummary struct { - - // containers - // Required: true - Containers int64 `json:"Containers"` - - // created - // Required: true - Created int64 `json:"Created"` - - // Id - // Required: true - ID string `json:"Id"` - - // labels - // Required: true - Labels map[string]string `json:"Labels"` - - // parent Id - // Required: true - ParentID string `json:"ParentId"` - - // repo digests - // Required: true - RepoDigests []string `json:"RepoDigests"` - - // repo tags - // Required: true - RepoTags []string `json:"RepoTags"` - - // shared size - // Required: true - SharedSize int64 `json:"SharedSize"` - - // size - // Required: true - Size int64 `json:"Size"` - - // virtual size - // Required: true - VirtualSize int64 `json:"VirtualSize"` -} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go deleted file mode 100644 index 8ac89402f..000000000 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ /dev/null @@ -1,103 +0,0 @@ -package mount - -import ( - "os" -) - -// Type represents the type of a mount. -type Type string - -// Type constants -const ( - // TypeBind is the type for mounting host dir - TypeBind Type = "bind" - // TypeVolume is the type for remote storage volumes - TypeVolume Type = "volume" - // TypeTmpfs is the type for mounting tmpfs - TypeTmpfs Type = "tmpfs" -) - -// Mount represents a mount (volume). -type Mount struct { - Type Type `json:",omitempty"` - // Source specifies the name of the mount. Depending on mount type, this - // may be a volume name or a host path, or even ignored. - // Source is not supported for tmpfs (must be an empty value) - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` -} - -// Propagation represents the propagation of a mount. -type Propagation string - -const ( - // PropagationRPrivate RPRIVATE - PropagationRPrivate Propagation = "rprivate" - // PropagationPrivate PRIVATE - PropagationPrivate Propagation = "private" - // PropagationRShared RSHARED - PropagationRShared Propagation = "rshared" - // PropagationShared SHARED - PropagationShared Propagation = "shared" - // PropagationRSlave RSLAVE - PropagationRSlave Propagation = "rslave" - // PropagationSlave SLAVE - PropagationSlave Propagation = "slave" -) - -// BindOptions defines options specific to mounts of type "bind". -type BindOptions struct { - Propagation Propagation `json:",omitempty"` -} - -// VolumeOptions represents the options for a mount of type volume. -type VolumeOptions struct { - NoCopy bool `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - DriverConfig *Driver `json:",omitempty"` -} - -// Driver represents a volume driver. -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TmpfsOptions defines options specific to mounts of type "tmpfs". -type TmpfsOptions struct { - // Size sets the size of the tmpfs, in bytes. - // - // This will be converted to an operating system specific value - // depending on the host. For example, on linux, it will be convered to - // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with - // docker, uses a straight byte value. - // - // Percentages are not supported. - SizeBytes int64 `json:",omitempty"` - // Mode of the tmpfs upon creation - Mode os.FileMode `json:",omitempty"` - - // TODO(stevvooe): There are several more tmpfs flags, specified in the - // daemon, that are accepted. Only the most basic are added for now. - // - // From docker/docker/pkg/mount/flags.go: - // - // var validFlags = map[string]bool{ - // "": true, - // "size": true, X - // "mode": true, X - // "uid": true, - // "gid": true, - // "nr_inodes": true, - // "nr_blocks": true, - // "mpol": true, - // } - // - // Some of these may be straightforward to add, but others, such as - // uid/gid have implications in a clustered system. -} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go deleted file mode 100644 index 832b3edb9..000000000 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ /dev/null @@ -1,59 +0,0 @@ -package network - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string //Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// PeerInfo represents one peer of a overlay network -type PeerInfo struct { - Name string - IP string -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go deleted file mode 100644 index 46f47be26..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ /dev/null @@ -1,186 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Plugin A plugin for the Engine API -// swagger:model Plugin -type Plugin struct { - - // config - // Required: true - Config PluginConfig `json:"Config"` - - // True when the plugin is running. False when the plugin is not running, only installed. - // Required: true - Enabled bool `json:"Enabled"` - - // Id - ID string `json:"Id,omitempty"` - - // name - // Required: true - Name string `json:"Name"` - - // settings - // Required: true - Settings PluginSettings `json:"Settings"` -} - -// PluginConfig The config of a plugin. -// swagger:model PluginConfig -type PluginConfig struct { - - // args - // Required: true - Args PluginConfigArgs `json:"Args"` - - // description - // Required: true - Description string `json:"Description"` - - // documentation - // Required: true - Documentation string `json:"Documentation"` - - // entrypoint - // Required: true - Entrypoint []string `json:"Entrypoint"` - - // env - // Required: true - Env []PluginEnv `json:"Env"` - - // interface - // Required: true - Interface PluginConfigInterface `json:"Interface"` - - // linux - // Required: true - Linux PluginConfigLinux `json:"Linux"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` - - // network - // Required: true - Network PluginConfigNetwork `json:"Network"` - - // propagated mount - // Required: true - PropagatedMount string `json:"PropagatedMount"` - - // user - User PluginConfigUser `json:"User,omitempty"` - - // work dir - // Required: true - WorkDir string `json:"WorkDir"` - - // rootfs - Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` -} - -// PluginConfigArgs plugin config args -// swagger:model PluginConfigArgs -type PluginConfigArgs struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value []string `json:"Value"` -} - -// PluginConfigInterface The interface between Docker and the plugin -// swagger:model PluginConfigInterface -type PluginConfigInterface struct { - - // socket - // Required: true - Socket string `json:"Socket"` - - // types - // Required: true - Types []PluginInterfaceType `json:"Types"` -} - -// PluginConfigLinux plugin config linux -// swagger:model PluginConfigLinux -type PluginConfigLinux struct { - - // allow all devices - // Required: true - AllowAllDevices bool `json:"AllowAllDevices"` - - // capabilities - // Required: true - Capabilities []string `json:"Capabilities"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` -} - -// PluginConfigNetwork plugin config network -// swagger:model PluginConfigNetwork -type PluginConfigNetwork struct { - - // type - // Required: true - Type string `json:"Type"` -} - -// PluginConfigRootfs plugin config rootfs -// swagger:model PluginConfigRootfs -type PluginConfigRootfs struct { - - // diff ids - DiffIds []string `json:"diff_ids"` - - // type - Type string `json:"type,omitempty"` -} - -// PluginConfigUser plugin config user -// swagger:model PluginConfigUser -type PluginConfigUser struct { - - // g ID - GID uint32 `json:"GID,omitempty"` - - // UID - UID uint32 `json:"UID,omitempty"` -} - -// PluginSettings Settings that can be modified by users. -// swagger:model PluginSettings -type PluginSettings struct { - - // args - // Required: true - Args []string `json:"Args"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` - - // env - // Required: true - Env []string `json:"Env"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go deleted file mode 100644 index 569901067..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_device.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginDevice plugin device -// swagger:model PluginDevice -type PluginDevice struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // path - // Required: true - Path *string `json:"Path"` - - // settable - // Required: true - Settable []string `json:"Settable"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go deleted file mode 100644 index 32962dc2e..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_env.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginEnv plugin env -// swagger:model PluginEnv -type PluginEnv struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value *string `json:"Value"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go deleted file mode 100644 index c82f204e8..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginInterfaceType plugin interface type -// swagger:model PluginInterfaceType -type PluginInterfaceType struct { - - // capability - // Required: true - Capability string `json:"Capability"` - - // prefix - // Required: true - Prefix string `json:"Prefix"` - - // version - // Required: true - Version string `json:"Version"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go deleted file mode 100644 index 5c031cf8b..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_mount.go +++ /dev/null @@ -1,37 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginMount plugin mount -// swagger:model PluginMount -type PluginMount struct { - - // description - // Required: true - Description string `json:"Description"` - - // destination - // Required: true - Destination string `json:"Destination"` - - // name - // Required: true - Name string `json:"Name"` - - // options - // Required: true - Options []string `json:"Options"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // source - // Required: true - Source *string `json:"Source"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go deleted file mode 100644 index d6f755311..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ /dev/null @@ -1,64 +0,0 @@ -package types - -import ( - "encoding/json" - "fmt" -) - -// PluginsListResponse contains the response for the Engine API -type PluginsListResponse []*Plugin - -const ( - authzDriver = "AuthzDriver" - graphDriver = "GraphDriver" - ipamDriver = "IpamDriver" - networkDriver = "NetworkDriver" - volumeDriver = "VolumeDriver" -) - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go deleted file mode 100644 index ad52d46d5..000000000 --- a/vendor/github.com/docker/docker/api/types/port.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Port An open port on a container -// swagger:model Port -type Port struct { - - // IP - IP string `json:"IP,omitempty"` - - // Port on the container - // Required: true - PrivatePort uint16 `json:"PrivatePort"` - - // Port exposed on the host - PublicPort uint16 `json:"PublicPort,omitempty"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go deleted file mode 100644 index 5e37d19bd..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ /dev/null @@ -1,21 +0,0 @@ -package registry - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/swagger-gen.sh -// ---------------------------------------------------------------------------- - -// AuthenticateOKBody authenticate o k body -// swagger:model AuthenticateOKBody -type AuthenticateOKBody struct { - - // An opaque token used to authenticate a user after a successful login - // Required: true - IdentityToken string `json:"IdentityToken"` - - // The status of the authentication - // Required: true - Status string `json:"Status"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go deleted file mode 100644 index 28fafab90..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ /dev/null @@ -1,104 +0,0 @@ -package registry - -import ( - "encoding/json" - "net" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// String returns the CIDR notation of ipnet -func (ipnet *NetIPNet) String() string { - return (*net.IPNet)(ipnet).String() -} - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} diff --git a/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/docker/docker/api/types/seccomp.go deleted file mode 100644 index 4f02ef36b..000000000 --- a/vendor/github.com/docker/docker/api/types/seccomp.go +++ /dev/null @@ -1,93 +0,0 @@ -package types - -// Seccomp represents the config for a seccomp profile for syscall restriction. -type Seccomp struct { - DefaultAction Action `json:"defaultAction"` - // Architectures is kept to maintain backward compatibility with the old - // seccomp profile. - Architectures []Arch `json:"architectures,omitempty"` - ArchMap []Architecture `json:"archMap,omitempty"` - Syscalls []*Syscall `json:"syscalls"` -} - -// Architecture is used to represent an specific architecture -// and its sub-architectures -type Architecture struct { - Arch Arch `json:"architecture"` - SubArches []Arch `json:"subArchitectures"` -} - -// Arch used for architectures -type Arch string - -// Additional architectures permitted to be used for system calls -// By default only the native architecture of the kernel is permitted -const ( - ArchX86 Arch = "SCMP_ARCH_X86" - ArchX86_64 Arch = "SCMP_ARCH_X86_64" - ArchX32 Arch = "SCMP_ARCH_X32" - ArchARM Arch = "SCMP_ARCH_ARM" - ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" - ArchMIPS Arch = "SCMP_ARCH_MIPS" - ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" - ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" - ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" - ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" - ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" - ArchPPC Arch = "SCMP_ARCH_PPC" - ArchPPC64 Arch = "SCMP_ARCH_PPC64" - ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" - ArchS390 Arch = "SCMP_ARCH_S390" - ArchS390X Arch = "SCMP_ARCH_S390X" -) - -// Action taken upon Seccomp rule match -type Action string - -// Define actions for Seccomp rules -const ( - ActKill Action = "SCMP_ACT_KILL" - ActTrap Action = "SCMP_ACT_TRAP" - ActErrno Action = "SCMP_ACT_ERRNO" - ActTrace Action = "SCMP_ACT_TRACE" - ActAllow Action = "SCMP_ACT_ALLOW" -) - -// Operator used to match syscall arguments in Seccomp -type Operator string - -// Define operators for syscall arguments in Seccomp -const ( - OpNotEqual Operator = "SCMP_CMP_NE" - OpLessThan Operator = "SCMP_CMP_LT" - OpLessEqual Operator = "SCMP_CMP_LE" - OpEqualTo Operator = "SCMP_CMP_EQ" - OpGreaterEqual Operator = "SCMP_CMP_GE" - OpGreaterThan Operator = "SCMP_CMP_GT" - OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" -) - -// Arg used for matching specific syscall arguments in Seccomp -type Arg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"valueTwo"` - Op Operator `json:"op"` -} - -// Filter is used to conditionally apply Seccomp rules -type Filter struct { - Caps []string `json:"caps,omitempty"` - Arches []string `json:"arches,omitempty"` -} - -// Syscall is used to match a group of syscalls in Seccomp -type Syscall struct { - Name string `json:"name,omitempty"` - Names []string `json:"names,omitempty"` - Action Action `json:"action"` - Args []*Arg `json:"args"` - Comment string `json:"comment"` - Includes Filter `json:"includes"` - Excludes Filter `json:"excludes"` -} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go deleted file mode 100644 index 74ea64b1b..000000000 --- a/vendor/github.com/docker/docker/api/types/service_update_response.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ServiceUpdateResponse service update response -// swagger:model ServiceUpdateResponse -type ServiceUpdateResponse struct { - - // Optional warning messages - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go deleted file mode 100644 index 9bf1928b8..000000000 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ /dev/null @@ -1,178 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container. -// Not used on Windows. -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds (Linux) - // Units: 100's of nanoseconds (Windows) - TotalUsage uint64 `json:"total_usage"` - - // Total CPU time consumed per core (Linux). Not used on Windows. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage,omitempty"` - - // Time spent by tasks of the cgroup in kernel mode (Linux). - // Time spent by all container processes in kernel mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - - // Time spent by tasks of the cgroup in user mode (Linux). - // Time spent by all container processes in user mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - // CPU Usage. Linux and Windows. - CPUUsage CPUUsage `json:"cpu_usage"` - - // System Usage. Linux only. - SystemUsage uint64 `json:"system_cpu_usage,omitempty"` - - // Throttling Data. Linux only. - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates all memory stats since container inception on Linux. -// Windows returns stats for commit and private working set only. -type MemoryStats struct { - // Linux Memory Stats - - // current res_counter usage for memory - Usage uint64 `json:"usage,omitempty"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage,omitempty"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats,omitempty"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty"` - - // Windows Memory Stats - // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx - - // committed bytes - Commit uint64 `json:"commitbytes,omitempty"` - // peak committed bytes - CommitPeak uint64 `json:"commitpeakbytes,omitempty"` - // private working set - PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// Not used on Windows. -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write. -// This is a Linux specific structure as the differences between expressing -// block I/O on Windows and Linux are sufficiently significant to make -// little sense attempting to morph into a combined structure. -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// StorageStats is the disk I/O stats for read/write on Windows. -type StorageStats struct { - ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` - ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` - WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` - WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` -} - -// NetworkStats aggregates the network stats of one container -type NetworkStats struct { - // Bytes received. Windows and Linux. - RxBytes uint64 `json:"rx_bytes"` - // Packets received. Windows and Linux. - RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we dont `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - RxErrors uint64 `json:"rx_errors"` - // Incoming packets dropped. Windows and Linux. - RxDropped uint64 `json:"rx_dropped"` - // Bytes sent. Windows and Linux. - TxBytes uint64 `json:"tx_bytes"` - // Packets sent. Windows and Linux. - TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we dont `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - TxErrors uint64 `json:"tx_errors"` - // Outgoing packets dropped. Windows and Linux. - TxDropped uint64 `json:"tx_dropped"` - // Endpoint ID. Not used on Linux. - EndpointID string `json:"endpoint_id,omitempty"` - // Instance ID. Not used on Linux. - InstanceID string `json:"instance_id,omitempty"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - // Common stats - Read time.Time `json:"read"` - PreRead time.Time `json:"preread"` - - // Linux specific stats, not populated on Windows. - PidsStats PidsStats `json:"pids_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - - // Windows specific stats, not populated on Linux. - NumProcs uint32 `json:"num_procs"` - StorageStats StorageStats `json:"storage_stats,omitempty"` - - // Shared stats - CPUStats CPUStats `json:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" - MemoryStats MemoryStats `json:"memory_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go deleted file mode 100644 index bad493fb8..000000000 --- a/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go deleted file mode 100644 index 64a648bad..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ /dev/null @@ -1,27 +0,0 @@ -package swarm - -import "time" - -// Version represents the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// Meta is a base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` -} - -// Driver represents a driver (network, logging). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go deleted file mode 100644 index 4ab476ccc..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ /dev/null @@ -1,46 +0,0 @@ -package swarm - -import ( - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" -) - -// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) -// Detailed documentation is available in: -// http://man7.org/linux/man-pages/man5/resolv.conf.5.html -// `nameserver`, `search`, `options` have been supported. -// TODO: `domain` is not supported yet. -type DNSConfig struct { - // Nameservers specifies the IP addresses of the name servers - Nameservers []string `json:",omitempty"` - // Search specifies the search list for host-name lookup - Search []string `json:",omitempty"` - // Options allows certain internal resolver variables to be modified - Options []string `json:",omitempty"` -} - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Hostname string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Groups []string `json:",omitempty"` - TTY bool `json:",omitempty"` - OpenStdin bool `json:",omitempty"` - Mounts []mount.Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` - Healthcheck *container.HealthConfig `json:",omitempty"` - // The format of extra hosts on swarmkit is specified in: - // http://man7.org/linux/man-pages/man5/hosts.5.html - // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go deleted file mode 100644 index 5a5e11bdb..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ /dev/null @@ -1,111 +0,0 @@ -package swarm - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - // TargetPort is the port inside the container - TargetPort uint32 `json:",omitempty"` - // PublishedPort is the port on the swarm hosts - PublishedPort uint32 `json:",omitempty"` - // PublishMode is the mode in which port is published - PublishMode PortConfigPublishMode `json:",omitempty"` -} - -// PortConfigPublishMode represents the mode in which the port is to -// be published. -type PortConfigPublishMode string - -const ( - // PortConfigPublishModeIngress is used for ports published - // for ingress load balancing using routing mesh. - PortConfigPublishModeIngress PortConfigPublishMode = "ingress" - // PortConfigPublishModeHost is used for ports published - // for direct host level access on the host where the task is running. - PortConfigPublishModeHost PortConfigPublishMode = "host" -) - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go deleted file mode 100644 index 379e17a77..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ /dev/null @@ -1,114 +0,0 @@ -package swarm - -// Node represents a node. -type Node struct { - ID string - Meta - // Spec defines the desired state of the node as specified by the user. - // The system will honor this and will *never* modify it. - Spec NodeSpec `json:",omitempty"` - // Description encapsulates the properties of the Node as reported by the - // agent. - Description NodeDescription `json:",omitempty"` - // Status provides the current status of the node, as seen by the manager. - Status NodeStatus `json:",omitempty"` - // ManagerStatus provides the current status of the node's manager - // component, if the node is a manager. - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` -} - -// Platform represents the platform (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go deleted file mode 100644 index fdb238888..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ /dev/null @@ -1,31 +0,0 @@ -package swarm - -import "os" - -// Secret represents a secret. -type Secret struct { - ID string - Meta - Spec SecretSpec -} - -// SecretSpec represents a secret specification from a secret in swarm -type SecretSpec struct { - Annotations - Data []byte `json:",omitempty"` -} - -// SecretReferenceFileTarget is a file target in a secret reference -type SecretReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// SecretReference is a reference to a secret in swarm -type SecretReference struct { - File *SecretReferenceFileTarget - SecretID string - SecretName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go deleted file mode 100644 index 2cf2642c1..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ /dev/null @@ -1,105 +0,0 @@ -package swarm - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - PreviousSpec *ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus UpdateStatus `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - - // Networks field in ServiceSpec is deprecated. The - // same field in TaskSpec should be used instead. - // This field will be removed in a future release. - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` -} - -// UpdateState is the state of a service update. -type UpdateState string - -const ( - // UpdateStateUpdating is the updating state. - UpdateStateUpdating UpdateState = "updating" - // UpdateStatePaused is the paused state. - UpdateStatePaused UpdateState = "paused" - // UpdateStateCompleted is the completed state. - UpdateStateCompleted UpdateState = "completed" -) - -// UpdateStatus reports the status of a service update. -type UpdateStatus struct { - State UpdateState `json:",omitempty"` - StartedAt time.Time `json:",omitempty"` - CompletedAt time.Time `json:",omitempty"` - Message string `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -const ( - // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" - // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" -) - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - // Maximum number of tasks to be updated in one iteration. - // 0 means unlimited parallelism. - Parallelism uint64 - - // Amount of time between updates. - Delay time.Duration `json:",omitempty"` - - // FailureAction is the action to take when an update failures. - FailureAction string `json:",omitempty"` - - // Monitor indicates how long to monitor a task for failure after it is - // created. If the task fails by ending up in one of the states - // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, - // this counts as a failure. If it fails after Monitor, it does not - // count as a failure. If Monitor is unspecified, a default value will - // be used. - Monitor time.Duration `json:",omitempty"` - - // MaxFailureRatio is the fraction of tasks that may fail during - // an update before the failure action is invoked. Any task created by - // the current update which ends up in one of the states REJECTED, - // COMPLETED or FAILED within Monitor from its creation counts as a - // failure. The number of failures is divided by the number of tasks - // being updated, and if this fraction is greater than - // MaxFailureRatio, the failure action is invoked. - // - // If the failure action is CONTINUE, there is no effect. - // If the failure action is PAUSE, no more tasks will be updated until - // another update is started. - MaxFailureRatio float32 -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go deleted file mode 100644 index 0b4221969..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ /dev/null @@ -1,197 +0,0 @@ -package swarm - -import "time" - -// ClusterInfo represents info about the cluster for outputing in "info" -// it contains the same information as "Swarm", but without the JoinTokens -type ClusterInfo struct { - ID string - Meta - Spec Spec -} - -// Swarm represents a swarm. -type Swarm struct { - ClusterInfo - JoinTokens JoinTokens -} - -// JoinTokens contains the tokens workers and managers need to join the swarm. -type JoinTokens struct { - // Worker is the join token workers may use to join the swarm. - Worker string - // Manager is the join token managers may use to join the swarm. - Manager string -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` - EncryptionConfig EncryptionConfig `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or - // node. If negative, never remove completed or failed tasks. - TaskHistoryRetentionLimit *int64 `json:",omitempty"` -} - -// TaskDefaults parameterizes cluster-level task creation with default values. -type TaskDefaults struct { - // LogDriver selects the log driver to use for tasks created in the - // orchestrator if unspecified by a service. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - LogDriver *Driver `json:",omitempty"` -} - -// EncryptionConfig controls at-rest encryption of data and keys. -type EncryptionConfig struct { - // AutoLockManagers specifies whether or not managers TLS keys and raft data - // should be encrypted at rest in such a way that they must be unlocked - // before the manager node starts up again. - AutoLockManagers bool -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - // SnapshotInterval is the number of log entries between snapshots. - SnapshotInterval uint64 `json:",omitempty"` - - // KeepOldSnapshots is the number of snapshots to keep beyond the - // current snapshot. - KeepOldSnapshots *uint64 `json:",omitempty"` - - // LogEntriesForSlowFollowers is the number of log entries to keep - // around to sync up slow followers after a snapshot is created. - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - - // ElectionTick is the number of ticks that a follower will wait for a message - // from the leader before becoming a candidate and starting an election. - // ElectionTick must be greater than HeartbeatTick. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - ElectionTick int - - // HeartbeatTick is the number of ticks between heartbeats. Every - // HeartbeatTick ticks, the leader will send a heartbeat to the - // followers. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - HeartbeatTick int -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - // HeartbeatPeriod defines how often agent should send heartbeats to - // dispatcher. - HeartbeatPeriod time.Duration `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - // NodeCertExpiry is the duration certificates should be issued for - NodeCertExpiry time.Duration `json:",omitempty"` - - // ExternalCAs is a list of CAs to which a manager node will make - // certificate signing requests for node certificates. - ExternalCAs []*ExternalCA `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - // Protocol is the protocol used by this external CA. - Protocol ExternalCAProtocol - - // URL is the URL where the external CA can be reached. - URL string - - // Options is a set of additional key/value pairs whose interpretation - // depends on the specified CA type. - Options map[string]string `json:",omitempty"` -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - AdvertiseAddr string - ForceNewCluster bool - Spec Spec - AutoLockManagers bool -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - AdvertiseAddr string - RemoteAddrs []string - JoinToken string // accept by secret -} - -// UnlockRequest is the request used to unlock a swarm. -type UnlockRequest struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" - // LocalNodeStateLocked LOCKED - LocalNodeStateLocked LocalNodeState = "locked" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - NodeAddr string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int - Managers int - - Cluster ClusterInfo -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} - -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool - RotateManagerUnlockKey bool -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go deleted file mode 100644 index ace12cc89..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ /dev/null @@ -1,128 +0,0 @@ -package swarm - -import "time" - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" -) - -// Task represents a task. -type Task struct { - ID string - Meta - Annotations - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - ContainerSpec ContainerSpec `json:",omitempty"` - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` - - // ForceUpdate is a counter that triggers an update even if no relevant - // parameters have been changed. - ForceUpdate uint64 -} - -// Resources represents resources (CPU/Memory). -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Resources `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus ContainerStatus `json:",omitempty"` - PortStatus PortStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string `json:",omitempty"` - PID int `json:",omitempty"` - ExitCode int `json:",omitempty"` -} - -// PortStatus represents the port status of a task's host ports whose -// service has published host ports -type PortStatus struct { - Ports []PortConfig `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go deleted file mode 100644 index a82c3e88e..000000000 --- a/vendor/github.com/docker/docker/api/types/types.go +++ /dev/null @@ -1,549 +0,0 @@ -package types - -import ( - "errors" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/go-connections/nat" -) - -// ContainerChange contains response of Engine API: -// GET "/containers/{name:.*}/changes" -type ContainerChange struct { - Kind int - Path string -} - -// ImageHistory contains response of Engine API: -// GET "/images/{name:.*}/history" -type ImageHistory struct { - ID string `json:"Id"` - Created int64 - CreatedBy string - Tags []string - Size int64 - Comment string -} - -// ImageDelete contains response of Engine API: -// DELETE "/images/{name:.*}" -type ImageDelete struct { - Untagged string `json:",omitempty"` - Deleted string `json:",omitempty"` -} - -// GraphDriverData returns Image's graph driver config info -// when calling inspect command -type GraphDriverData struct { - Name string - Data map[string]string -} - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` -} - -// ImageInspect contains response of Engine API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string - ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Os string - OsVersion string `json:",omitempty"` - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS -} - -// Container contains response of Engine API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Engine API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerStats contains response of Engine API: -// GET "/stats" -type ContainerStats struct { - Body io.ReadCloser `json:"body"` - OSType string `json:"ostype"` -} - -// ContainerProcessList contains response of Engine API: -// GET "/containers/{name:.*}/top" -type ContainerProcessList struct { - Processes [][]string - Titles []string -} - -// Ping contains response of Engine API: -// GET "/_ping" -type Ping struct { - APIVersion string - Experimental bool -} - -// Version contains response of Engine API: -// GET "/version" -type Version struct { - Version string - APIVersion string `json:"ApiVersion"` - MinAPIVersion string `json:"MinAPIVersion,omitempty"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Commit records a external tool actual commit id version along the -// one expect by dockerd as set at build time -type Commit struct { - ID string - Expected string -} - -// Info contains response of Engine API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - LoggingDriver string - CgroupDriver string - NEventsListener int - KernelVersion string - OperatingSystem string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - ClusterStore string - ClusterAdvertise string - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool - Isolation container.Isolation - InitBinary string - ContainerdCommit Commit - RuncCommit Commit - InitCommit Commit - SecurityOptions []string -} - -// KeyValue holds a key/value pair -type KeyValue struct { - Key, Value string -} - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// SecurityOpt -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - split := strings.Split(opt, ",") - for _, s := range split { - kv := strings.SplitN(s, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("invalid security option %q", s) - } - if kv[0] == "" || kv[1] == "" { - return nil, errors.New("invalid empty security option") - } - if kv[0] == "name" { - secopt.Name = kv[1] - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) - } - so = append(so, secopt) - } - return so, nil -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool -} - -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - NoHealthcheck = "none" // Indicates there is no healthcheck - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerNode stores information about the node that a container -// is running on. It's only available in Docker Swarm -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} - -// ContainerJSONBase contains response of Engine API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` - Name string - RestartCount int - Driver string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -// This is used for reporting the mountpoints in use by a container. -type MountPoint struct { - Type mount.Type `json:",omitempty"` - Name string `json:",omitempty"` - Source string - Destination string - Driver string `json:",omitempty"` - Mode string - RW bool - Propagation mount.Propagation -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - CheckDuplicate bool - Driver string - EnableIPv6 bool - IPAM *network.IPAM - Internal bool - Attachable bool - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - Path string `json:"path"` - Args []string `json:"runtimeArgs,omitempty"` -} - -// DiskUsage contains response of Engine API: -// GET "/system/df" -type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*Volume -} - -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" -type ContainersPruneReport struct { - ContainersDeleted []string - SpaceReclaimed uint64 -} - -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune" -type VolumesPruneReport struct { - VolumesDeleted []string - SpaceReclaimed uint64 -} - -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" -type ImagesPruneReport struct { - ImagesDeleted []ImageDelete - SpaceReclaimed uint64 -} - -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" -type NetworksPruneReport struct { - NetworksDeleted []string -} - -// SecretCreateResponse contains the information returned to a client -// on the creation of a new secret. -type SecretCreateResponse struct { - // ID is the id of the created secret. - ID string -} - -// SecretListOptions holds parameters to list secrets -type SecretListOptions struct { - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go deleted file mode 100644 index 611d4fed6..000000000 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ /dev/null @@ -1,62 +0,0 @@ -package versions - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go deleted file mode 100644 index da4f8ebd9..000000000 --- a/vendor/github.com/docker/docker/api/types/volume.go +++ /dev/null @@ -1,58 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Volume volume -// swagger:model Volume -type Volume struct { - - // Name of the volume driver used by the volume. - // Required: true - Driver string `json:"Driver"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // Mount path of the volume on the host. - // Required: true - Mountpoint string `json:"Mountpoint"` - - // Name of the volume. - // Required: true - Name string `json:"Name"` - - // The driver specific options used when creating the volume. - // Required: true - Options map[string]string `json:"Options"` - - // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. - // Required: true - Scope string `json:"Scope"` - - // Low-level details about the volume, provided by the volume driver. - // Details are returned as a map with key/value pairs: - // `{"key":"value","key2":"value2"}`. - // - // The `Status` field is optional, and is omitted if the volume driver - // does not support this feature. - // - Status map[string]interface{} `json:"Status,omitempty"` - - // usage data - UsageData *VolumeUsageData `json:"UsageData,omitempty"` -} - -// VolumeUsageData volume usage data -// swagger:model VolumeUsageData -type VolumeUsageData struct { - - // The number of containers referencing this volume. - // Required: true - RefCount int64 `json:"RefCount"` - - // The disk space used by the volume (local driver only) - // Required: true - Size int64 `json:"Size"` -} diff --git a/vendor/github.com/docker/docker/builder/builder.go b/vendor/github.com/docker/docker/builder/builder.go deleted file mode 100644 index ced19e81e..000000000 --- a/vendor/github.com/docker/docker/builder/builder.go +++ /dev/null @@ -1,169 +0,0 @@ -// Package builder defines interfaces for any Docker builder to implement. -// -// Historically, only server-side Dockerfile interpreters existed. -// This package allows for other implementations of Docker builders. -package builder - -import ( - "io" - "os" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/image" - "github.com/docker/docker/reference" - "golang.org/x/net/context" -) - -const ( - // DefaultDockerfileName is the Default filename with Docker commands, read by docker build - DefaultDockerfileName string = "Dockerfile" -) - -// Context represents a file system tree. -type Context interface { - // Close allows to signal that the filesystem tree won't be used anymore. - // For Context implementations using a temporary directory, it is recommended to - // delete the temporary directory in Close(). - Close() error - // Stat returns an entry corresponding to path if any. - // It is recommended to return an error if path was not found. - // If path is a symlink it also returns the path to the target file. - Stat(path string) (string, FileInfo, error) - // Open opens path from the context and returns a readable stream of it. - Open(path string) (io.ReadCloser, error) - // Walk walks the tree of the context with the function passed to it. - Walk(root string, walkFn WalkFunc) error -} - -// WalkFunc is the type of the function called for each file or directory visited by Context.Walk(). -type WalkFunc func(path string, fi FileInfo, err error) error - -// ModifiableContext represents a modifiable Context. -// TODO: remove this interface once we can get rid of Remove() -type ModifiableContext interface { - Context - // Remove deletes the entry specified by `path`. - // It is usual for directory entries to delete all its subentries. - Remove(path string) error -} - -// FileInfo extends os.FileInfo to allow retrieving an absolute path to the file. -// TODO: remove this interface once pkg/archive exposes a walk function that Context can use. -type FileInfo interface { - os.FileInfo - Path() string -} - -// PathFileInfo is a convenience struct that implements the FileInfo interface. -type PathFileInfo struct { - os.FileInfo - // FilePath holds the absolute path to the file. - FilePath string - // Name holds the basename for the file. - FileName string -} - -// Path returns the absolute path to the file. -func (fi PathFileInfo) Path() string { - return fi.FilePath -} - -// Name returns the basename of the file. -func (fi PathFileInfo) Name() string { - if fi.FileName != "" { - return fi.FileName - } - return fi.FileInfo.Name() -} - -// Hashed defines an extra method intended for implementations of os.FileInfo. -type Hashed interface { - // Hash returns the hash of a file. - Hash() string - SetHash(string) -} - -// HashedFileInfo is a convenient struct that augments FileInfo with a field. -type HashedFileInfo struct { - FileInfo - // FileHash represents the hash of a file. - FileHash string -} - -// Hash returns the hash of a file. -func (fi HashedFileInfo) Hash() string { - return fi.FileHash -} - -// SetHash sets the hash of a file. -func (fi *HashedFileInfo) SetHash(h string) { - fi.FileHash = h -} - -// Backend abstracts calls to a Docker Daemon. -type Backend interface { - // TODO: use digest reference instead of name - - // GetImageOnBuild looks up a Docker image referenced by `name`. - GetImageOnBuild(name string) (Image, error) - // TagImage tags an image with newTag - TagImageWithReference(image.ID, reference.Named) error - // PullOnBuild tells Docker to pull image referenced by `name`. - PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error) - // ContainerAttachRaw attaches to container. - ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error - // ContainerCreate creates a new Docker container and returns potential warnings - ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) - // ContainerRm removes a container specified by `id`. - ContainerRm(name string, config *types.ContainerRmConfig) error - // Commit creates a new Docker image from an existing Docker container. - Commit(string, *backend.ContainerCommitConfig) (string, error) - // ContainerKill stops the container execution abruptly. - ContainerKill(containerID string, sig uint64) error - // ContainerStart starts a new container - ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error - // ContainerWait stops processing until the given container is stopped. - ContainerWait(containerID string, timeout time.Duration) (int, error) - // ContainerUpdateCmdOnBuild updates container.Path and container.Args - ContainerUpdateCmdOnBuild(containerID string, cmd []string) error - // ContainerCreateWorkdir creates the workdir (currently only used on Windows) - ContainerCreateWorkdir(containerID string) error - - // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container - // specified by a container object. - // TODO: make an Extract method instead of passing `decompress` - // TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used - // with Context.Walk - // ContainerCopy(name string, res string) (io.ReadCloser, error) - // TODO: use copyBackend api - CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error - - // HasExperimental checks if the backend supports experimental features - HasExperimental() bool - - // SquashImage squashes the fs layers from the provided image down to the specified `to` image - SquashImage(from string, to string) (string, error) -} - -// Image represents a Docker image used by the builder. -type Image interface { - ImageID() string - RunConfig() *container.Config -} - -// ImageCacheBuilder represents a generator for stateful image cache. -type ImageCacheBuilder interface { - // MakeImageCache creates a stateful image cache. - MakeImageCache(cacheFrom []string) ImageCache -} - -// ImageCache abstracts an image cache. -// (parent image, child runconfig) -> child image -type ImageCache interface { - // GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` - // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. - GetCache(parentID string, cfg *container.Config) (imageID string, err error) -} diff --git a/vendor/github.com/docker/docker/builder/context.go b/vendor/github.com/docker/docker/builder/context.go deleted file mode 100644 index 600f42319..000000000 --- a/vendor/github.com/docker/docker/builder/context.go +++ /dev/null @@ -1,260 +0,0 @@ -package builder - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/gitutils" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" -) - -// ValidateContextDirectory checks if all the contents of the directory -// can be read and returns an error if some files can't be read -// symlinks which point to non-existing files don't trigger an error -func ValidateContextDirectory(srcPath string, excludes []string) error { - contextRoot, err := getContextRoot(srcPath) - if err != nil { - return err - } - return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - if os.IsPermission(err) { - return fmt.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - - // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { - return err - } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { - return err - } else if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - // skip checking if symlinks point to non-existing files, such symlinks can be useful - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - return nil - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return fmt.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - return nil - }) -} - -// GetContextFromReader will read the contents of the given reader as either a -// Dockerfile or tar archive. Returns a tar archive used as a context and a -// path to the Dockerfile inside the tar. -func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { - buf := bufio.NewReader(r) - - magic, err := buf.Peek(archive.HeaderSize) - if err != nil && err != io.EOF { - return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err) - } - - if archive.IsArchive(magic) { - return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil - } - - // Input should be read as a Dockerfile. - tmpDir, err := ioutil.TempDir("", "docker-build-context-") - if err != nil { - return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err) - } - - f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName)) - if err != nil { - return nil, "", err - } - _, err = io.Copy(f, buf) - if err != nil { - f.Close() - return nil, "", err - } - - if err := f.Close(); err != nil { - return nil, "", err - } - if err := r.Close(); err != nil { - return nil, "", err - } - - tar, err := archive.Tar(tmpDir, archive.Uncompressed) - if err != nil { - return nil, "", err - } - - return ioutils.NewReadCloserWrapper(tar, func() error { - err := tar.Close() - os.RemoveAll(tmpDir) - return err - }), DefaultDockerfileName, nil - -} - -// GetContextFromGitURL uses a Git URL as context for a `docker build`. The -// git repo is cloned into a temporary directory used as the context directory. -// Returns the absolute path to the temporary context directory, the relative -// path of the dockerfile in that context directory, and a non-nil error on -// success. -func GetContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { - if _, err := exec.LookPath("git"); err != nil { - return "", "", fmt.Errorf("unable to find 'git': %v", err) - } - if absContextDir, err = gitutils.Clone(gitURL); err != nil { - return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) - } - - return getDockerfileRelPath(absContextDir, dockerfileName) -} - -// GetContextFromURL uses a remote URL as context for a `docker build`. The -// remote resource is downloaded as either a Dockerfile or a tar archive. -// Returns the tar archive used for the context and a path of the -// dockerfile inside the tar. -func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { - response, err := httputils.Download(remoteURL) - if err != nil { - return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) - } - progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true) - - // Pass the response body through a progress reader. - progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) - - return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) -} - -// GetContextFromLocalDir uses the given local directory as context for a -// `docker build`. Returns the absolute path to the local context directory, -// the relative path of the dockerfile in that context directory, and a non-nil -// error on success. -func GetContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { - // When using a local context directory, when the Dockerfile is specified - // with the `-f/--file` option then it is considered relative to the - // current directory and not the context directory. - if dockerfileName != "" { - if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { - return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err) - } - } - - return getDockerfileRelPath(localDir, dockerfileName) -} - -// getDockerfileRelPath uses the given context directory for a `docker build` -// and returns the absolute path to the context directory, the relative path of -// the dockerfile in that context directory, and a non-nil error on success. -func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { - if absContextDir, err = filepath.Abs(givenContextDir); err != nil { - return "", "", fmt.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err) - } - - // The context dir might be a symbolic link, so follow it to the actual - // target directory. - // - // FIXME. We use isUNC (always false on non-Windows platforms) to workaround - // an issue in golang. On Windows, EvalSymLinks does not work on UNC file - // paths (those starting with \\). This hack means that when using links - // on UNC paths, they will not be followed. - if !isUNC(absContextDir) { - absContextDir, err = filepath.EvalSymlinks(absContextDir) - if err != nil { - return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) - } - } - - stat, err := os.Lstat(absContextDir) - if err != nil { - return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err) - } - - if !stat.IsDir() { - return "", "", fmt.Errorf("context must be a directory: %s", absContextDir) - } - - absDockerfile := givenDockerfile - if absDockerfile == "" { - // No -f/--file was specified so use the default relative to the - // context directory. - absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName) - - // Just to be nice ;-) look for 'dockerfile' too but only - // use it if we found it, otherwise ignore this check - if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { - altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName)) - if _, err = os.Lstat(altPath); err == nil { - absDockerfile = altPath - } - } - } - - // If not already an absolute path, the Dockerfile path should be joined to - // the base directory. - if !filepath.IsAbs(absDockerfile) { - absDockerfile = filepath.Join(absContextDir, absDockerfile) - } - - // Evaluate symlinks in the path to the Dockerfile too. - // - // FIXME. We use isUNC (always false on non-Windows platforms) to workaround - // an issue in golang. On Windows, EvalSymLinks does not work on UNC file - // paths (those starting with \\). This hack means that when using links - // on UNC paths, they will not be followed. - if !isUNC(absDockerfile) { - absDockerfile, err = filepath.EvalSymlinks(absDockerfile) - if err != nil { - return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) - } - } - - if _, err := os.Lstat(absDockerfile); err != nil { - if os.IsNotExist(err) { - return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile) - } - return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err) - } - - if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { - return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err) - } - - if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { - return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) - } - - return absContextDir, relDockerfile, nil -} - -// isUNC returns true if the path is UNC (one starting \\). It always returns -// false on Linux. -func isUNC(path string) bool { - return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) -} diff --git a/vendor/github.com/docker/docker/builder/context_unix.go b/vendor/github.com/docker/docker/builder/context_unix.go deleted file mode 100644 index d1f72e057..000000000 --- a/vendor/github.com/docker/docker/builder/context_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package builder - -import ( - "path/filepath" -) - -func getContextRoot(srcPath string) (string, error) { - return filepath.Join(srcPath, "."), nil -} diff --git a/vendor/github.com/docker/docker/builder/context_windows.go b/vendor/github.com/docker/docker/builder/context_windows.go deleted file mode 100644 index b8ba2ba23..000000000 --- a/vendor/github.com/docker/docker/builder/context_windows.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build windows - -package builder - -import ( - "path/filepath" - - "github.com/docker/docker/pkg/longpath" -) - -func getContextRoot(srcPath string) (string, error) { - cr, err := filepath.Abs(srcPath) - if err != nil { - return "", err - } - return longpath.AddPrefix(cr), nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore.go deleted file mode 100644 index 3da791336..000000000 --- a/vendor/github.com/docker/docker/builder/dockerignore.go +++ /dev/null @@ -1,48 +0,0 @@ -package builder - -import ( - "os" - - "github.com/docker/docker/builder/dockerignore" - "github.com/docker/docker/pkg/fileutils" -) - -// DockerIgnoreContext wraps a ModifiableContext to add a method -// for handling the .dockerignore file at the root of the context. -type DockerIgnoreContext struct { - ModifiableContext -} - -// Process reads the .dockerignore file at the root of the embedded context. -// If .dockerignore does not exist in the context, then nil is returned. -// -// It can take a list of files to be removed after .dockerignore is removed. -// This is used for server-side implementations of builders that need to send -// the .dockerignore file as well as the special files specified in filesToRemove, -// but expect them to be excluded from the context after they were processed. -// -// For example, server-side Dockerfile builders are expected to pass in the name -// of the Dockerfile to be removed after it was parsed. -// -// TODO: Don't require a ModifiableContext (use Context instead) and don't remove -// files, instead handle a list of files to be excluded from the context. -func (c DockerIgnoreContext) Process(filesToRemove []string) error { - f, err := c.Open(".dockerignore") - // Note that a missing .dockerignore file isn't treated as an error - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - excludes, _ := dockerignore.ReadAll(f) - f.Close() - filesToRemove = append([]string{".dockerignore"}, filesToRemove...) - for _, fileToRemove := range filesToRemove { - rm, _ := fileutils.Matches(fileToRemove, excludes) - if rm { - c.Remove(fileToRemove) - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go deleted file mode 100644 index 2db67be79..000000000 --- a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go +++ /dev/null @@ -1,49 +0,0 @@ -package dockerignore - -import ( - "bufio" - "bytes" - "fmt" - "io" - "path/filepath" - "strings" -) - -// ReadAll reads a .dockerignore file and returns the list of file patterns -// to ignore. Note this will trim whitespace from each line as well -// as use GO's "clean" func to get the shortest/cleanest path for each. -func ReadAll(reader io.Reader) ([]string, error) { - if reader == nil { - return nil, nil - } - - scanner := bufio.NewScanner(reader) - var excludes []string - currentLine := 0 - - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - pattern := string(scannedBytes) - currentLine++ - // Lines starting with # (comments) are ignored before processing - if strings.HasPrefix(pattern, "#") { - continue - } - pattern = strings.TrimSpace(pattern) - if pattern == "" { - continue - } - pattern = filepath.Clean(pattern) - pattern = filepath.ToSlash(pattern) - excludes = append(excludes, pattern) - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("Error reading .dockerignore: %v", err) - } - return excludes, nil -} diff --git a/vendor/github.com/docker/docker/builder/git.go b/vendor/github.com/docker/docker/builder/git.go deleted file mode 100644 index 74df24461..000000000 --- a/vendor/github.com/docker/docker/builder/git.go +++ /dev/null @@ -1,28 +0,0 @@ -package builder - -import ( - "os" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/gitutils" -) - -// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. -func MakeGitContext(gitURL string) (ModifiableContext, error) { - root, err := gitutils.Clone(gitURL) - if err != nil { - return nil, err - } - - c, err := archive.Tar(root, archive.Uncompressed) - if err != nil { - return nil, err - } - - defer func() { - // TODO: print errors? - c.Close() - os.RemoveAll(root) - }() - return MakeTarSumContext(c) -} diff --git a/vendor/github.com/docker/docker/builder/remote.go b/vendor/github.com/docker/docker/builder/remote.go deleted file mode 100644 index f3a4329d1..000000000 --- a/vendor/github.com/docker/docker/builder/remote.go +++ /dev/null @@ -1,157 +0,0 @@ -package builder - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "regexp" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/urlutil" -) - -// When downloading remote contexts, limit the amount (in bytes) -// to be read from the response body in order to detect its Content-Type -const maxPreambleLength = 100 - -const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` - -var mimeRe = regexp.MustCompile(acceptableRemoteMIME) - -// MakeRemoteContext downloads a context from remoteURL and returns it. -// -// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of -// maxPreambleLength bytes from the body to help detecting the MIME type. -// Look at acceptableRemoteMIME for more details. -// -// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected -// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). -// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. -func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) { - f, err := httputils.Download(remoteURL) - if err != nil { - return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err) - } - defer f.Body.Close() - - var contextReader io.ReadCloser - if contentTypeHandlers != nil { - contentType := f.Header.Get("Content-Type") - clen := f.ContentLength - - contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) - if err != nil { - return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err) - } - defer contextReader.Close() - - // This loop tries to find a content-type handler for the detected content-type. - // If it could not find one from the caller-supplied map, it tries the empty content-type `""` - // which is interpreted as a fallback handler (usually used for raw tar contexts). - for _, ct := range []string{contentType, ""} { - if fn, ok := contentTypeHandlers[ct]; ok { - defer contextReader.Close() - if contextReader, err = fn(contextReader); err != nil { - return nil, err - } - break - } - } - } - - // Pass through - this is a pre-packaged context, presumably - // with a Dockerfile with the right name inside it. - return MakeTarSumContext(contextReader) -} - -// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used -// irrespective of user input. -// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). -func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) { - switch { - case remoteURL == "": - context, err = MakeTarSumContext(r) - case urlutil.IsGitURL(remoteURL): - context, err = MakeGitContext(remoteURL) - case urlutil.IsURL(remoteURL): - context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ - httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { - dockerfile, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller - // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. - dockerfileName = DefaultDockerfileName - - // TODO: return a context without tarsum - r, err := archive.Generate(dockerfileName, string(dockerfile)) - if err != nil { - return nil, err - } - - return ioutil.NopCloser(r), nil - }, - // fallback handler (tar context) - "": func(rc io.ReadCloser) (io.ReadCloser, error) { - return createProgressReader(rc), nil - }, - }) - default: - err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) - } - return -} - -// inspectResponse looks into the http response data at r to determine whether its -// content-type is on the list of acceptable content types for remote build contexts. -// This function returns: -// - a string representation of the detected content-type -// - an io.Reader for the response body -// - an error value which will be non-nil either when something goes wrong while -// reading bytes from r or when the detected content-type is not acceptable. -func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) { - plen := clen - if plen <= 0 || plen > maxPreambleLength { - plen = maxPreambleLength - } - - preamble := make([]byte, plen, plen) - rlen, err := r.Read(preamble) - if rlen == 0 { - return ct, r, errors.New("empty response") - } - if err != nil && err != io.EOF { - return ct, r, err - } - - preambleR := bytes.NewReader(preamble) - bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) - // Some web servers will use application/octet-stream as the default - // content type for files without an extension (e.g. 'Dockerfile') - // so if we receive this value we better check for text content - contentType := ct - if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream { - contentType, _, err = httputils.DetectContentType(preamble) - if err != nil { - return contentType, bodyReader, err - } - } - - contentType = selectAcceptableMIME(contentType) - var cterr error - if len(contentType) == 0 { - cterr = fmt.Errorf("unsupported Content-Type %q", ct) - contentType = ct - } - - return contentType, bodyReader, cterr -} - -func selectAcceptableMIME(ct string) string { - return mimeRe.FindString(ct) -} diff --git a/vendor/github.com/docker/docker/builder/tarsum.go b/vendor/github.com/docker/docker/builder/tarsum.go deleted file mode 100644 index 35054dcba..000000000 --- a/vendor/github.com/docker/docker/builder/tarsum.go +++ /dev/null @@ -1,158 +0,0 @@ -package builder - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/tarsum" -) - -type tarSumContext struct { - root string - sums tarsum.FileInfoSums -} - -func (c *tarSumContext) Close() error { - return os.RemoveAll(c.root) -} - -func convertPathError(err error, cleanpath string) error { - if err, ok := err.(*os.PathError); ok { - err.Path = cleanpath - return err - } - return err -} - -func (c *tarSumContext) Open(path string) (io.ReadCloser, error) { - cleanpath, fullpath, err := c.normalize(path) - if err != nil { - return nil, err - } - r, err := os.Open(fullpath) - if err != nil { - return nil, convertPathError(err, cleanpath) - } - return r, nil -} - -func (c *tarSumContext) Stat(path string) (string, FileInfo, error) { - cleanpath, fullpath, err := c.normalize(path) - if err != nil { - return "", nil, err - } - - st, err := os.Lstat(fullpath) - if err != nil { - return "", nil, convertPathError(err, cleanpath) - } - - rel, err := filepath.Rel(c.root, fullpath) - if err != nil { - return "", nil, convertPathError(err, cleanpath) - } - - // We set sum to path by default for the case where GetFile returns nil. - // The usual case is if relative path is empty. - sum := path - // Use the checksum of the followed path(not the possible symlink) because - // this is the file that is actually copied. - if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { - sum = tsInfo.Sum() - } - fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum} - return rel, fi, nil -} - -// MakeTarSumContext returns a build Context from a tar stream. -// -// It extracts the tar stream to a temporary folder that is deleted as soon as -// the Context is closed. -// As the extraction happens, a tarsum is calculated for every file, and the set of -// all those sums then becomes the source of truth for all operations on this Context. -// -// Closing tarStream has to be done by the caller. -func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) { - root, err := ioutils.TempDir("", "docker-builder") - if err != nil { - return nil, err - } - - tsc := &tarSumContext{root: root} - - // Make sure we clean-up upon error. In the happy case the caller - // is expected to manage the clean-up - defer func() { - if err != nil { - tsc.Close() - } - }() - - decompressedStream, err := archive.DecompressStream(tarStream) - if err != nil { - return nil, err - } - - sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) - if err != nil { - return nil, err - } - - if err := chrootarchive.Untar(sum, root, nil); err != nil { - return nil, err - } - - tsc.sums = sum.GetSums() - - return tsc, nil -} - -func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) { - cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] - fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) - if err != nil { - return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath) - } - _, err = os.Lstat(fullpath) - if err != nil { - return "", "", convertPathError(err, path) - } - return -} - -func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error { - root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root)) - return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error { - rel, err := filepath.Rel(c.root, fullpath) - if err != nil { - return err - } - if rel == "." { - return nil - } - - sum := rel - if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { - sum = tsInfo.Sum() - } - fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum} - if err := walkFn(rel, fi, nil); err != nil { - return err - } - return nil - }) -} - -func (c *tarSumContext) Remove(path string) error { - _, fullpath, err := c.normalize(path) - if err != nil { - return err - } - return os.RemoveAll(fullpath) -} diff --git a/vendor/github.com/docker/docker/cliconfig/config.go b/vendor/github.com/docker/docker/cliconfig/config.go deleted file mode 100644 index d81bf86b7..000000000 --- a/vendor/github.com/docker/docker/cliconfig/config.go +++ /dev/null @@ -1,120 +0,0 @@ -package cliconfig - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/pkg/homedir" -) - -const ( - // ConfigFileName is the name of config file - ConfigFileName = "config.json" - configFileDir = ".docker" - oldConfigfile = ".dockercfg" -) - -var ( - configDir = os.Getenv("DOCKER_CONFIG") -) - -func init() { - if configDir == "" { - configDir = filepath.Join(homedir.Get(), configFileDir) - } -} - -// ConfigDir returns the directory the configuration file is stored in -func ConfigDir() string { - return configDir -} - -// SetConfigDir sets the directory the configuration file is stored in -func SetConfigDir(dir string) { - configDir = dir -} - -// NewConfigFile initializes an empty configuration file for the given filename 'fn' -func NewConfigFile(fn string) *configfile.ConfigFile { - return &configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - HTTPHeaders: make(map[string]string), - Filename: fn, - } -} - -// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from -// a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LegacyLoadFromReader(configData) - return &configFile, err -} - -// LoadFromReader is a convenience function that creates a ConfigFile object from -// a reader -func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LoadFromReader(configData) - return &configFile, err -} - -// Load reads the configuration files in the given directory, and sets up -// the auth config information and returns values. -// FIXME: use the internal golang config parser -func Load(configDir string) (*configfile.ConfigFile, error) { - if configDir == "" { - configDir = ConfigDir() - } - - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - Filename: filepath.Join(configDir, ConfigFileName), - } - - // Try happy path first - latest config file - if _, err := os.Stat(configFile.Filename); err == nil { - file, err := os.Open(configFile.Filename) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) - } - defer file.Close() - err = configFile.LoadFromReader(file) - if err != nil { - err = fmt.Errorf("%s - %v", configFile.Filename, err) - } - return &configFile, err - } else if !os.IsNotExist(err) { - // if file is there but we can't stat it for any reason other - // than it doesn't exist then stop - return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) - } - - // Can't find latest config file so check for the old one - confFile := filepath.Join(homedir.Get(), oldConfigfile) - if _, err := os.Stat(confFile); err != nil { - return &configFile, nil //missing file is not an error - } - file, err := os.Open(confFile) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", confFile, err) - } - defer file.Close() - err = configFile.LegacyLoadFromReader(file) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", confFile, err) - } - - if configFile.HTTPHeaders == nil { - configFile.HTTPHeaders = map[string]string{} - } - return &configFile, nil -} diff --git a/vendor/github.com/docker/docker/cliconfig/configfile/file.go b/vendor/github.com/docker/docker/cliconfig/configfile/file.go deleted file mode 100644 index 39097133a..000000000 --- a/vendor/github.com/docker/docker/cliconfig/configfile/file.go +++ /dev/null @@ -1,183 +0,0 @@ -package configfile - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/api/types" -) - -const ( - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexserver = "https://index.docker.io/v1/" -) - -// ConfigFile ~/.docker/config.json file info -type ConfigFile struct { - AuthConfigs map[string]types.AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - NetworksFormat string `json:"networksFormat,omitempty"` - VolumesFormat string `json:"volumesFormat,omitempty"` - StatsFormat string `json:"statsFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - CredentialHelpers map[string]string `json:"credHelpers,omitempty"` - Filename string `json:"-"` // Note: for internal use only - ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` -} - -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return fmt.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return fmt.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - authConfig.ServerAddress = defaultIndexserver - configFile.AuthConfigs[defaultIndexserver] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - -// LoadFromReader reads the configuration data given and sets up the auth config -// information with given directory and populates the receiver object -func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { - if err := json.NewDecoder(configData).Decode(&configFile); err != nil { - return err - } - var err error - for addr, ac := range configFile.AuthConfigs { - ac.Username, ac.Password, err = decodeAuth(ac.Auth) - if err != nil { - return err - } - ac.Auth = "" - ac.ServerAddress = addr - configFile.AuthConfigs[addr] = ac - } - return nil -} - -// ContainsAuth returns whether there is authentication configured -// in this file or not. -func (configFile *ConfigFile) ContainsAuth() bool { - return configFile.CredentialsStore != "" || - len(configFile.CredentialHelpers) > 0 || - len(configFile.AuthConfigs) > 0 -} - -// SaveToWriter encodes and writes out all the authorization information to -// the given writer -func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { - // Encode sensitive data into a new/temp struct - tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) - for k, authConfig := range configFile.AuthConfigs { - authCopy := authConfig - // encode and save the authstring, while blanking out the original fields - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - tmpAuthConfigs[k] = authCopy - } - - saveAuthConfigs := configFile.AuthConfigs - configFile.AuthConfigs = tmpAuthConfigs - defer func() { configFile.AuthConfigs = saveAuthConfigs }() - - data, err := json.MarshalIndent(configFile, "", "\t") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Save encodes and writes out all the authorization information -func (configFile *ConfigFile) Save() error { - if configFile.Filename == "" { - return fmt.Errorf("Can't save config with empty filename") - } - - if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil { - return err - } - f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - defer f.Close() - return configFile.SaveToWriter(f) -} - -// encodeAuth creates a base64 encoded string to containing authorization information -func encodeAuth(authConfig *types.AuthConfig) string { - if authConfig.Username == "" && authConfig.Password == "" { - return "" - } - - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decodeAuth decodes a base64 encoded string and returns username and password -func decodeAuth(authStr string) (string, string, error) { - if authStr == "" { - return "", "", nil - } - - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", fmt.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", fmt.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/counter.go b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go deleted file mode 100644 index 5ea604f5b..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/counter.go +++ /dev/null @@ -1,67 +0,0 @@ -package graphdriver - -import "sync" - -type minfo struct { - check bool - count int -} - -// RefCounter is a generic counter for use by graphdriver Get/Put calls -type RefCounter struct { - counts map[string]*minfo - mu sync.Mutex - checker Checker -} - -// NewRefCounter returns a new RefCounter -func NewRefCounter(c Checker) *RefCounter { - return &RefCounter{ - checker: c, - counts: make(map[string]*minfo), - } -} - -// Increment increaes the ref count for the given id and returns the current count -func (c *RefCounter) Increment(path string) int { - c.mu.Lock() - m := c.counts[path] - if m == nil { - m = &minfo{} - c.counts[path] = m - } - // if we are checking this path for the first time check to make sure - // if it was already mounted on the system and make sure we have a correct ref - // count if it is mounted as it is in use. - if !m.check { - m.check = true - if c.checker.IsMounted(path) { - m.count++ - } - } - m.count++ - c.mu.Unlock() - return m.count -} - -// Decrement decreases the ref count for the given id and returns the current count -func (c *RefCounter) Decrement(path string) int { - c.mu.Lock() - m := c.counts[path] - if m == nil { - m = &minfo{} - c.counts[path] = m - } - // if we are checking this path for the first time check to make sure - // if it was already mounted on the system and make sure we have a correct ref - // count if it is mounted as it is in use. - if !m.check { - m.check = true - if c.checker.IsMounted(path) { - m.count++ - } - } - m.count-- - c.mu.Unlock() - return m.count -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go deleted file mode 100644 index f0bce562b..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go +++ /dev/null @@ -1,270 +0,0 @@ -package graphdriver - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/plugingetter" -) - -// FsMagic unsigned id of the filesystem in use. -type FsMagic uint32 - -const ( - // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. - FsMagicUnsupported = FsMagic(0x00000000) -) - -var ( - // All registered drivers - drivers map[string]InitFunc - - // ErrNotSupported returned when driver is not supported. - ErrNotSupported = errors.New("driver not supported") - // ErrPrerequisites retuned when driver does not meet prerequisites. - ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") - // ErrIncompatibleFS returned when file system is not supported. - ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") -) - -//CreateOpts contains optional arguments for Create() and CreateReadWrite() -// methods. -type CreateOpts struct { - MountLabel string - StorageOpt map[string]string -} - -// InitFunc initializes the storage driver. -type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) - -// ProtoDriver defines the basic capabilities of a driver. -// This interface exists solely to be a minimum set of methods -// for client code which choose not to implement the entire Driver -// interface and use the NaiveDiffDriver wrapper constructor. -// -// Use of ProtoDriver directly by client code is not recommended. -type ProtoDriver interface { - // String returns a string representation of this driver. - String() string - // CreateReadWrite creates a new, empty filesystem layer that is ready - // to be used as the storage for a container. Additional options can - // be passed in opts. parent may be "" and opts may be nil. - CreateReadWrite(id, parent string, opts *CreateOpts) error - // Create creates a new, empty, filesystem layer with the - // specified id and parent and options passed in opts. Parent - // may be "" and opts may be nil. - Create(id, parent string, opts *CreateOpts) error - // Remove attempts to remove the filesystem layer with this id. - Remove(id string) error - // Get returns the mountpoint for the layered filesystem referred - // to by this id. You can optionally specify a mountLabel or "". - // Returns the absolute path to the mounted layered filesystem. - Get(id, mountLabel string) (dir string, err error) - // Put releases the system resources for the specified id, - // e.g, unmounting layered filesystem. - Put(id string) error - // Exists returns whether a filesystem layer with the specified - // ID exists on this driver. - Exists(id string) bool - // Status returns a set of key-value pairs which give low - // level diagnostic status about this driver. - Status() [][2]string - // Returns a set of key-value pairs which give low level information - // about the image/container driver is managing. - GetMetadata(id string) (map[string]string, error) - // Cleanup performs necessary tasks to release resources - // held by the driver, e.g., unmounting all layered filesystems - // known to this driver. - Cleanup() error -} - -// DiffDriver is the interface to use to implement graph diffs -type DiffDriver interface { - // Diff produces an archive of the changes between the specified - // layer and its parent layer which may be "". - Diff(id, parent string) (io.ReadCloser, error) - // Changes produces a list of changes between the specified layer - // and its parent layer. If parent is "", then all changes will be ADD changes. - Changes(id, parent string) ([]archive.Change, error) - // ApplyDiff extracts the changeset from the given diff into the - // layer with the specified id and parent, returning the size of the - // new layer in bytes. - // The archive.Reader must be an uncompressed stream. - ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) - // DiffSize calculates the changes between the specified id - // and its parent and returns the size in bytes of the changes - // relative to its base filesystem directory. - DiffSize(id, parent string) (size int64, err error) -} - -// Driver is the interface for layered/snapshot file system drivers. -type Driver interface { - ProtoDriver - DiffDriver -} - -// DiffGetterDriver is the interface for layered file system drivers that -// provide a specialized function for getting file contents for tar-split. -type DiffGetterDriver interface { - Driver - // DiffGetter returns an interface to efficiently retrieve the contents - // of files in a layer. - DiffGetter(id string) (FileGetCloser, error) -} - -// FileGetCloser extends the storage.FileGetter interface with a Close method -// for cleaning up. -type FileGetCloser interface { - storage.FileGetter - // Close cleans up any resources associated with the FileGetCloser. - Close() error -} - -// Checker makes checks on specified filesystems. -type Checker interface { - // IsMounted returns true if the provided path is mounted for the specific checker - IsMounted(path string) bool -} - -func init() { - drivers = make(map[string]InitFunc) -} - -// Register registers an InitFunc for the driver. -func Register(name string, initFunc InitFunc) error { - if _, exists := drivers[name]; exists { - return fmt.Errorf("Name already registered %s", name) - } - drivers[name] = initFunc - - return nil -} - -// GetDriver initializes and returns the registered driver -func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { - if initFunc, exists := drivers[name]; exists { - return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) - } - - pluginDriver, err := lookupPlugin(name, pg, config) - if err == nil { - return pluginDriver, nil - } - logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") - return nil, ErrNotSupported -} - -// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins -func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { - if initFunc, exists := drivers[name]; exists { - return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) - } - logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) - return nil, ErrNotSupported -} - -// Options is used to initialize a graphdriver -type Options struct { - Root string - DriverOptions []string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ExperimentalEnabled bool -} - -// New creates the driver and initializes it at the specified root. -func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { - if name != "" { - logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver - return GetDriver(name, pg, config) - } - - // Guess for prior driver - driversMap := scanPriorDrivers(config.Root) - for _, name := range priority { - if name == "vfs" { - // don't use vfs even if there is state present. - continue - } - if _, prior := driversMap[name]; prior { - // of the state found from prior drivers, check in order of our priority - // which we would prefer - driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) - if err != nil { - // unlike below, we will return error here, because there is prior - // state, and now it is no longer supported/prereq/compatible, so - // something changed and needs attention. Otherwise the daemon's - // images would just "disappear". - logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) - return nil, err - } - - // abort starting when there are other prior configured drivers - // to ensure the user explicitly selects the driver to load - if len(driversMap)-1 > 0 { - var driversSlice []string - for name := range driversMap { - driversSlice = append(driversSlice, name) - } - - return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) - } - - logrus.Infof("[graphdriver] using prior storage driver: %s", name) - return driver, nil - } - } - - // Check for priority drivers first - for _, name := range priority { - driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) - if err != nil { - if isDriverNotSupported(err) { - continue - } - return nil, err - } - return driver, nil - } - - // Check all registered drivers if no priority driver is found - for name, initFunc := range drivers { - driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) - if err != nil { - if isDriverNotSupported(err) { - continue - } - return nil, err - } - return driver, nil - } - return nil, fmt.Errorf("No supported storage backend found") -} - -// isDriverNotSupported returns true if the error initializing -// the graph driver is a non-supported error. -func isDriverNotSupported(err error) bool { - return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS -} - -// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers -func scanPriorDrivers(root string) map[string]bool { - driversMap := make(map[string]bool) - - for driver := range drivers { - p := filepath.Join(root, driver) - if _, err := os.Stat(p); err == nil && driver != "vfs" { - driversMap[driver] = true - } - } - return driversMap -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go deleted file mode 100644 index 2891a84f3..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go +++ /dev/null @@ -1,19 +0,0 @@ -package graphdriver - -import "syscall" - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "zfs", - } -) - -// Mounted checks if the given path is mounted as the fs type -func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf syscall.Statfs_t - if err := syscall.Statfs(mountPath, &buf); err != nil { - return false, err - } - return FsMagic(buf.Type) == fsType, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go deleted file mode 100644 index 5c8d0e230..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go +++ /dev/null @@ -1,135 +0,0 @@ -// +build linux - -package graphdriver - -import ( - "path/filepath" - "syscall" - - "github.com/docker/docker/pkg/mount" -) - -const ( - // FsMagicAufs filesystem id for Aufs - FsMagicAufs = FsMagic(0x61756673) - // FsMagicBtrfs filesystem id for Btrfs - FsMagicBtrfs = FsMagic(0x9123683E) - // FsMagicCramfs filesystem id for Cramfs - FsMagicCramfs = FsMagic(0x28cd3d45) - // FsMagicEcryptfs filesystem id for eCryptfs - FsMagicEcryptfs = FsMagic(0xf15f) - // FsMagicExtfs filesystem id for Extfs - FsMagicExtfs = FsMagic(0x0000EF53) - // FsMagicF2fs filesystem id for F2fs - FsMagicF2fs = FsMagic(0xF2F52010) - // FsMagicGPFS filesystem id for GPFS - FsMagicGPFS = FsMagic(0x47504653) - // FsMagicJffs2Fs filesystem if for Jffs2Fs - FsMagicJffs2Fs = FsMagic(0x000072b6) - // FsMagicJfs filesystem id for Jfs - FsMagicJfs = FsMagic(0x3153464a) - // FsMagicNfsFs filesystem id for NfsFs - FsMagicNfsFs = FsMagic(0x00006969) - // FsMagicRAMFs filesystem id for RamFs - FsMagicRAMFs = FsMagic(0x858458f6) - // FsMagicReiserFs filesystem id for ReiserFs - FsMagicReiserFs = FsMagic(0x52654973) - // FsMagicSmbFs filesystem id for SmbFs - FsMagicSmbFs = FsMagic(0x0000517B) - // FsMagicSquashFs filesystem id for SquashFs - FsMagicSquashFs = FsMagic(0x73717368) - // FsMagicTmpFs filesystem id for TmpFs - FsMagicTmpFs = FsMagic(0x01021994) - // FsMagicVxFS filesystem id for VxFs - FsMagicVxFS = FsMagic(0xa501fcf5) - // FsMagicXfs filesystem id for Xfs - FsMagicXfs = FsMagic(0x58465342) - // FsMagicZfs filesystem id for Zfs - FsMagicZfs = FsMagic(0x2fc12fc1) - // FsMagicOverlay filesystem id for overlay - FsMagicOverlay = FsMagic(0x794C7630) -) - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "aufs", - "btrfs", - "zfs", - "overlay2", - "overlay", - "devicemapper", - "vfs", - } - - // FsNames maps filesystem id to name of the filesystem. - FsNames = map[FsMagic]string{ - FsMagicAufs: "aufs", - FsMagicBtrfs: "btrfs", - FsMagicCramfs: "cramfs", - FsMagicExtfs: "extfs", - FsMagicF2fs: "f2fs", - FsMagicGPFS: "gpfs", - FsMagicJffs2Fs: "jffs2", - FsMagicJfs: "jfs", - FsMagicNfsFs: "nfs", - FsMagicOverlay: "overlayfs", - FsMagicRAMFs: "ramfs", - FsMagicReiserFs: "reiserfs", - FsMagicSmbFs: "smb", - FsMagicSquashFs: "squashfs", - FsMagicTmpFs: "tmpfs", - FsMagicUnsupported: "unsupported", - FsMagicVxFS: "vxfs", - FsMagicXfs: "xfs", - FsMagicZfs: "zfs", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - var buf syscall.Statfs_t - if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { - return 0, err - } - return FsMagic(buf.Type), nil -} - -// NewFsChecker returns a checker configured for the provied FsMagic -func NewFsChecker(t FsMagic) Checker { - return &fsChecker{ - t: t, - } -} - -type fsChecker struct { - t FsMagic -} - -func (c *fsChecker) IsMounted(path string) bool { - m, _ := Mounted(c.t, path) - return m -} - -// NewDefaultChecker returns a check that parses /proc/mountinfo to check -// if the specified path is mounted. -func NewDefaultChecker() Checker { - return &defaultChecker{} -} - -type defaultChecker struct { -} - -func (c *defaultChecker) IsMounted(path string) bool { - m, _ := mount.Mounted(path) - return m -} - -// Mounted checks if the given path is mounted as the fs type -func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf syscall.Statfs_t - if err := syscall.Statfs(mountPath, &buf); err != nil { - return false, err - } - return FsMagic(buf.Type) == fsType, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go deleted file mode 100644 index 7daf01c32..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build solaris,cgo - -package graphdriver - -/* -#include -#include - -static inline struct statvfs *getstatfs(char *s) { - struct statvfs *buf; - int err; - buf = (struct statvfs *)malloc(sizeof(struct statvfs)); - err = statvfs(s, buf); - return buf; -} -*/ -import "C" -import ( - "path/filepath" - "unsafe" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/mount" -) - -const ( - // FsMagicZfs filesystem id for Zfs - FsMagicZfs = FsMagic(0x2fc12fc1) -) - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "zfs", - } - - // FsNames maps filesystem id to name of the filesystem. - FsNames = map[FsMagic]string{ - FsMagicZfs: "zfs", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - return 0, nil -} - -type fsChecker struct { - t FsMagic -} - -func (c *fsChecker) IsMounted(path string) bool { - m, _ := Mounted(c.t, path) - return m -} - -// NewFsChecker returns a checker configured for the provied FsMagic -func NewFsChecker(t FsMagic) Checker { - return &fsChecker{ - t: t, - } -} - -// NewDefaultChecker returns a check that parses /proc/mountinfo to check -// if the specified path is mounted. -// No-op on Solaris. -func NewDefaultChecker() Checker { - return &defaultChecker{} -} - -type defaultChecker struct { -} - -func (c *defaultChecker) IsMounted(path string) bool { - m, _ := mount.Mounted(path) - return m -} - -// Mounted checks if the given path is mounted as the fs type -//Solaris supports only ZFS for now -func Mounted(fsType FsMagic, mountPath string) (bool, error) { - - cs := C.CString(filepath.Dir(mountPath)) - buf := C.getstatfs(cs) - - // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] - if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || - (buf.f_basetype[3] != 0) { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) - C.free(unsafe.Pointer(buf)) - return false, ErrPrerequisites - } - - C.free(unsafe.Pointer(buf)) - C.free(unsafe.Pointer(cs)) - return true, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go deleted file mode 100644 index 4a875608b..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris - -package graphdriver - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "unsupported", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - return FsMagicUnsupported, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go deleted file mode 100644 index ffd30c295..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -package graphdriver - -var ( - // Slice of drivers that should be used in order - priority = []string{ - "windowsfilter", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - // Note it is OK to return FsMagicUnsupported on Windows. - return FsMagicUnsupported, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go deleted file mode 100644 index 20826cd7d..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go +++ /dev/null @@ -1,169 +0,0 @@ -package graphdriver - -import ( - "io" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" -) - -var ( - // ApplyUncompressedLayer defines the unpack method used by the graph - // driver. - ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer -) - -// NaiveDiffDriver takes a ProtoDriver and adds the -// capability of the Diffing methods which it may or may not -// support on its own. See the comment on the exported -// NewNaiveDiffDriver function below. -// Notably, the AUFS driver doesn't need to be wrapped like this. -type NaiveDiffDriver struct { - ProtoDriver - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap -} - -// NewNaiveDiffDriver returns a fully functional driver that wraps the -// given ProtoDriver and adds the capability of the following methods which -// it may or may not support on its own: -// Diff(id, parent string) (archive.Archive, error) -// Changes(id, parent string) ([]archive.Change, error) -// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) -// DiffSize(id, parent string) (size int64, err error) -func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { - return &NaiveDiffDriver{ProtoDriver: driver, - uidMaps: uidMaps, - gidMaps: gidMaps} -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { - startTime := time.Now() - driver := gdw.ProtoDriver - - layerFs, err := driver.Get(id, "") - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - driver.Put(id) - } - }() - - if parent == "" { - archive, err := archive.Tar(layerFs, archive.Uncompressed) - if err != nil { - return nil, err - } - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(id) - return err - }), nil - } - - parentFs, err := driver.Get(parent, "") - if err != nil { - return nil, err - } - defer driver.Put(parent) - - changes, err := archive.ChangesDirs(layerFs, parentFs) - if err != nil { - return nil, err - } - - archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) - if err != nil { - return nil, err - } - - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(id) - - // NaiveDiffDriver compares file metadata with parent layers. Parent layers - // are extracted from tar's with full second precision on modified time. - // We need this hack here to make sure calls within same second receive - // correct result. - time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) - return err - }), nil -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { - driver := gdw.ProtoDriver - - layerFs, err := driver.Get(id, "") - if err != nil { - return nil, err - } - defer driver.Put(id) - - parentFs := "" - - if parent != "" { - parentFs, err = driver.Get(parent, "") - if err != nil { - return nil, err - } - defer driver.Put(parent) - } - - return archive.ChangesDirs(layerFs, parentFs) -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { - driver := gdw.ProtoDriver - - // Mount the root filesystem so we can apply the diff/layer. - layerFs, err := driver.Get(id, "") - if err != nil { - return - } - defer driver.Put(id) - - options := &archive.TarOptions{UIDMaps: gdw.uidMaps, - GIDMaps: gdw.gidMaps} - start := time.Now().UTC() - logrus.Debug("Start untar layer") - if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { - return - } - logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) - - return -} - -// DiffSize calculates the changes between the specified layer -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { - driver := gdw.ProtoDriver - - changes, err := gdw.Changes(id, parent) - if err != nil { - return - } - - layerFs, err := driver.Get(id, "") - if err != nil { - return - } - defer driver.Put(id) - - return archive.ChangesSize(layerFs, changes), nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go deleted file mode 100644 index 7294bcc5f..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go +++ /dev/null @@ -1,43 +0,0 @@ -package graphdriver - -import ( - "fmt" - "io" - "path/filepath" - - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/plugin/v2" -) - -type pluginClient interface { - // Call calls the specified method with the specified arguments for the plugin. - Call(string, interface{}, interface{}) error - // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream - Stream(string, interface{}) (io.ReadCloser, error) - // SendFile calls the specified method, and passes through the IO stream - SendFile(string, io.Reader, interface{}) error -} - -func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { - if !config.ExperimentalEnabled { - return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") - } - pl, err := pg.Get(name, "GraphDriver", plugingetter.ACQUIRE) - if err != nil { - return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) - } - return newPluginDriver(name, pl, config) -} - -func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) { - home := config.Root - if !pl.IsV1() { - if p, ok := pl.(*v2.Plugin); ok { - if p.PropagatedMount != "" { - home = p.PluginObj.Config.PropagatedMount - } - } - } - proxy := &graphDriverProxy{name, pl} - return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go deleted file mode 100644 index bfe74cc6f..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go +++ /dev/null @@ -1,252 +0,0 @@ -package graphdriver - -import ( - "errors" - "fmt" - "io" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/plugingetter" -) - -type graphDriverProxy struct { - name string - p plugingetter.CompatPlugin -} - -type graphDriverRequest struct { - ID string `json:",omitempty"` - Parent string `json:",omitempty"` - MountLabel string `json:",omitempty"` - StorageOpt map[string]string `json:",omitempty"` -} - -type graphDriverResponse struct { - Err string `json:",omitempty"` - Dir string `json:",omitempty"` - Exists bool `json:",omitempty"` - Status [][2]string `json:",omitempty"` - Changes []archive.Change `json:",omitempty"` - Size int64 `json:",omitempty"` - Metadata map[string]string `json:",omitempty"` -} - -type graphDriverInitRequest struct { - Home string - Opts []string `json:"Opts"` - UIDMaps []idtools.IDMap `json:"UIDMaps"` - GIDMaps []idtools.IDMap `json:"GIDMaps"` -} - -func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error { - if !d.p.IsV1() { - if cp, ok := d.p.(plugingetter.CountedPlugin); ok { - // always acquire here, it will be cleaned up on daemon shutdown - cp.Acquire() - } - } - args := &graphDriverInitRequest{ - Home: home, - Opts: opts, - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) String() string { - return d.name -} - -func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - if opts != nil { - args.MountLabel = opts.MountLabel - args.StorageOpt = opts.StorageOpt - } - - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.CreateReadWrite", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - if opts != nil { - args.MountLabel = opts.MountLabel - args.StorageOpt = opts.StorageOpt - } - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Create", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Remove(id string) error { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { - args := &graphDriverRequest{ - ID: id, - MountLabel: mountLabel, - } - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil { - return "", err - } - var err error - if ret.Err != "" { - err = errors.New(ret.Err) - } - return filepath.Join(d.p.BasePath(), ret.Dir), err -} - -func (d *graphDriverProxy) Put(id string) error { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Exists(id string) bool { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil { - return false - } - return ret.Exists -} - -func (d *graphDriverProxy) Status() [][2]string { - args := &graphDriverRequest{} - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil { - return nil - } - return ret.Status -} - -func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { - args := &graphDriverRequest{ - ID: id, - } - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil { - return nil, err - } - if ret.Err != "" { - return nil, errors.New(ret.Err) - } - return ret.Metadata, nil -} - -func (d *graphDriverProxy) Cleanup() error { - if !d.p.IsV1() { - if cp, ok := d.p.(plugingetter.CountedPlugin); ok { - // always release - defer cp.Release() - } - } - - args := &graphDriverRequest{} - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil { - return nil - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - body, err := d.p.Client().Stream("GraphDriver.Diff", args) - if err != nil { - return nil, err - } - return body, nil -} - -func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil { - return nil, err - } - if ret.Err != "" { - return nil, errors.New(ret.Err) - } - - return ret.Changes, nil -} - -func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { - var ret graphDriverResponse - if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { - return -1, err - } - if ret.Err != "" { - return -1, errors.New(ret.Err) - } - return ret.Size, nil -} - -func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil { - return -1, err - } - if ret.Err != "" { - return -1, errors.New(ret.Err) - } - return ret.Size, nil -} diff --git a/vendor/github.com/docker/docker/image/fs.go b/vendor/github.com/docker/docker/image/fs.go deleted file mode 100644 index 39cfbf5d7..000000000 --- a/vendor/github.com/docker/docker/image/fs.go +++ /dev/null @@ -1,173 +0,0 @@ -package image - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/ioutils" -) - -// DigestWalkFunc is function called by StoreBackend.Walk -type DigestWalkFunc func(id digest.Digest) error - -// StoreBackend provides interface for image.Store persistence -type StoreBackend interface { - Walk(f DigestWalkFunc) error - Get(id digest.Digest) ([]byte, error) - Set(data []byte) (digest.Digest, error) - Delete(id digest.Digest) error - SetMetadata(id digest.Digest, key string, data []byte) error - GetMetadata(id digest.Digest, key string) ([]byte, error) - DeleteMetadata(id digest.Digest, key string) error -} - -// fs implements StoreBackend using the filesystem. -type fs struct { - sync.RWMutex - root string -} - -const ( - contentDirName = "content" - metadataDirName = "metadata" -) - -// NewFSStoreBackend returns new filesystem based backend for image.Store -func NewFSStoreBackend(root string) (StoreBackend, error) { - return newFSStore(root) -} - -func newFSStore(root string) (*fs, error) { - s := &fs{ - root: root, - } - if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { - return nil, err - } - if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { - return nil, err - } - return s, nil -} - -func (s *fs) contentFile(dgst digest.Digest) string { - return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) -} - -func (s *fs) metadataDir(dgst digest.Digest) string { - return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) -} - -// Walk calls the supplied callback for each image ID in the storage backend. -func (s *fs) Walk(f DigestWalkFunc) error { - // Only Canonical digest (sha256) is currently supported - s.RLock() - dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) - s.RUnlock() - if err != nil { - return err - } - for _, v := range dir { - dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) - if err := dgst.Validate(); err != nil { - logrus.Debugf("Skipping invalid digest %s: %s", dgst, err) - continue - } - if err := f(dgst); err != nil { - return err - } - } - return nil -} - -// Get returns the content stored under a given digest. -func (s *fs) Get(dgst digest.Digest) ([]byte, error) { - s.RLock() - defer s.RUnlock() - - return s.get(dgst) -} - -func (s *fs) get(dgst digest.Digest) ([]byte, error) { - content, err := ioutil.ReadFile(s.contentFile(dgst)) - if err != nil { - return nil, err - } - - // todo: maybe optional - if digest.FromBytes(content) != dgst { - return nil, fmt.Errorf("failed to verify: %v", dgst) - } - - return content, nil -} - -// Set stores content by checksum. -func (s *fs) Set(data []byte) (digest.Digest, error) { - s.Lock() - defer s.Unlock() - - if len(data) == 0 { - return "", fmt.Errorf("Invalid empty data") - } - - dgst := digest.FromBytes(data) - if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { - return "", err - } - - return dgst, nil -} - -// Delete removes content and metadata files associated with the digest. -func (s *fs) Delete(dgst digest.Digest) error { - s.Lock() - defer s.Unlock() - - if err := os.RemoveAll(s.metadataDir(dgst)); err != nil { - return err - } - if err := os.Remove(s.contentFile(dgst)); err != nil { - return err - } - return nil -} - -// SetMetadata sets metadata for a given ID. It fails if there's no base file. -func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error { - s.Lock() - defer s.Unlock() - if _, err := s.get(dgst); err != nil { - return err - } - - baseDir := filepath.Join(s.metadataDir(dgst)) - if err := os.MkdirAll(baseDir, 0700); err != nil { - return err - } - return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600) -} - -// GetMetadata returns metadata for a given digest. -func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) { - s.RLock() - defer s.RUnlock() - - if _, err := s.get(dgst); err != nil { - return nil, err - } - return ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) -} - -// DeleteMetadata removes the metadata associated with a digest. -func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { - s.Lock() - defer s.Unlock() - - return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) -} diff --git a/vendor/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go deleted file mode 100644 index 29a990a55..000000000 --- a/vendor/github.com/docker/docker/image/image.go +++ /dev/null @@ -1,150 +0,0 @@ -package image - -import ( - "encoding/json" - "errors" - "io" - "time" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/api/types/container" -) - -// ID is the content-addressable ID of an image. -type ID digest.Digest - -func (id ID) String() string { - return id.Digest().String() -} - -// Digest converts ID into a digest -func (id ID) Digest() digest.Digest { - return digest.Digest(id) -} - -// IDFromDigest creates an ID from a digest -func IDFromDigest(digest digest.Digest) ID { - return ID(digest) -} - -// V1Image stores the V1 image configuration. -type V1Image struct { - // ID a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent id of the image - Parent string `json:"parent,omitempty"` - // Comment user added comment - Comment string `json:"comment,omitempty"` - // Created timestamp when image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig container.Config `json:"container_config,omitempty"` - // DockerVersion specifies version on which image is built - DockerVersion string `json:"docker_version,omitempty"` - // Author of the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *container.Config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// Image stores the image configuration -type Image struct { - V1Image - Parent ID `json:"parent,omitempty"` - RootFS *RootFS `json:"rootfs,omitempty"` - History []History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - - // rawJSON caches the immutable JSON associated with this image. - rawJSON []byte - - // computedID is the ID computed from the hash of the image config. - // Not to be confused with the legacy V1 ID in V1Image. - computedID ID -} - -// RawJSON returns the immutable JSON associated with the image. -func (img *Image) RawJSON() []byte { - return img.rawJSON -} - -// ID returns the image's content-addressable ID. -func (img *Image) ID() ID { - return img.computedID -} - -// ImageID stringifies ID. -func (img *Image) ImageID() string { - return img.ID().String() -} - -// RunConfig returns the image's container config. -func (img *Image) RunConfig() *container.Config { - return img.Config -} - -// MarshalJSON serializes the image to JSON. It sorts the top-level keys so -// that JSON that's been manipulated by a push/pull cycle with a legacy -// registry won't end up with a different key order. -func (img *Image) MarshalJSON() ([]byte, error) { - type MarshalImage Image - - pass1, err := json.Marshal(MarshalImage(*img)) - if err != nil { - return nil, err - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(pass1, &c); err != nil { - return nil, err - } - return json.Marshal(c) -} - -// History stores build commands that were used to create an image -type History struct { - // Created timestamp for build point - Created time.Time `json:"created"` - // Author of the build point - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building image. - CreatedBy string `json:"created_by,omitempty"` - // Comment is custom message set by the user when creating the image. - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Exporter provides interface for exporting and importing images -type Exporter interface { - Load(io.ReadCloser, io.Writer, bool) error - // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error - Save([]string, io.Writer) error -} - -// NewFromJSON creates an Image configuration from json. -func NewFromJSON(src []byte) (*Image, error) { - img := &Image{} - - if err := json.Unmarshal(src, img); err != nil { - return nil, err - } - if img.RootFS == nil { - return nil, errors.New("Invalid image JSON, no RootFS key.") - } - - img.rawJSON = src - - return img, nil -} diff --git a/vendor/github.com/docker/docker/image/rootfs.go b/vendor/github.com/docker/docker/image/rootfs.go deleted file mode 100644 index 7b24e3ed1..000000000 --- a/vendor/github.com/docker/docker/image/rootfs.go +++ /dev/null @@ -1,44 +0,0 @@ -package image - -import ( - "runtime" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/layer" -) - -// TypeLayers is used for RootFS.Type for filesystems organized into layers. -const TypeLayers = "layers" - -// typeLayersWithBase is an older format used by Windows up to v1.12. We -// explicitly handle this as an error case to ensure that a daemon which still -// has an older image like this on disk can still start, even though the -// image itself is not usable. See https://github.com/docker/docker/pull/25806. -const typeLayersWithBase = "layers+base" - -// RootFS describes images root filesystem -// This is currently a placeholder that only supports layers. In the future -// this can be made into an interface that supports different implementations. -type RootFS struct { - Type string `json:"type"` - DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` -} - -// NewRootFS returns empty RootFS struct -func NewRootFS() *RootFS { - return &RootFS{Type: TypeLayers} -} - -// Append appends a new diffID to rootfs -func (r *RootFS) Append(id layer.DiffID) { - r.DiffIDs = append(r.DiffIDs, id) -} - -// ChainID returns the ChainID for the top layer in RootFS. -func (r *RootFS) ChainID() layer.ChainID { - if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { - logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) - return "" - } - return layer.CreateChainID(r.DiffIDs) -} diff --git a/vendor/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go deleted file mode 100644 index b61c45609..000000000 --- a/vendor/github.com/docker/docker/image/store.go +++ /dev/null @@ -1,295 +0,0 @@ -package image - -import ( - "encoding/json" - "errors" - "fmt" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/layer" -) - -// Store is an interface for creating and accessing images -type Store interface { - Create(config []byte) (ID, error) - Get(id ID) (*Image, error) - Delete(id ID) ([]layer.Metadata, error) - Search(partialID string) (ID, error) - SetParent(id ID, parent ID) error - GetParent(id ID) (ID, error) - Children(id ID) []ID - Map() map[ID]*Image - Heads() map[ID]*Image -} - -// LayerGetReleaser is a minimal interface for getting and releasing images. -type LayerGetReleaser interface { - Get(layer.ChainID) (layer.Layer, error) - Release(layer.Layer) ([]layer.Metadata, error) -} - -type imageMeta struct { - layer layer.Layer - children map[ID]struct{} -} - -type store struct { - sync.Mutex - ls LayerGetReleaser - images map[ID]*imageMeta - fs StoreBackend - digestSet *digest.Set -} - -// NewImageStore returns new store object for given layer store -func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { - is := &store{ - ls: ls, - images: make(map[ID]*imageMeta), - fs: fs, - digestSet: digest.NewSet(), - } - - // load all current images and retain layers - if err := is.restore(); err != nil { - return nil, err - } - - return is, nil -} - -func (is *store) restore() error { - err := is.fs.Walk(func(dgst digest.Digest) error { - img, err := is.Get(IDFromDigest(dgst)) - if err != nil { - logrus.Errorf("invalid image %v, %v", dgst, err) - return nil - } - var l layer.Layer - if chainID := img.RootFS.ChainID(); chainID != "" { - l, err = is.ls.Get(chainID) - if err != nil { - return err - } - } - if err := is.digestSet.Add(dgst); err != nil { - return err - } - - imageMeta := &imageMeta{ - layer: l, - children: make(map[ID]struct{}), - } - - is.images[IDFromDigest(dgst)] = imageMeta - - return nil - }) - if err != nil { - return err - } - - // Second pass to fill in children maps - for id := range is.images { - if parent, err := is.GetParent(id); err == nil { - if parentMeta := is.images[parent]; parentMeta != nil { - parentMeta.children[id] = struct{}{} - } - } - } - - return nil -} - -func (is *store) Create(config []byte) (ID, error) { - var img Image - err := json.Unmarshal(config, &img) - if err != nil { - return "", err - } - - // Must reject any config that references diffIDs from the history - // which aren't among the rootfs layers. - rootFSLayers := make(map[layer.DiffID]struct{}) - for _, diffID := range img.RootFS.DiffIDs { - rootFSLayers[diffID] = struct{}{} - } - - layerCounter := 0 - for _, h := range img.History { - if !h.EmptyLayer { - layerCounter++ - } - } - if layerCounter > len(img.RootFS.DiffIDs) { - return "", errors.New("too many non-empty layers in History section") - } - - dgst, err := is.fs.Set(config) - if err != nil { - return "", err - } - imageID := IDFromDigest(dgst) - - is.Lock() - defer is.Unlock() - - if _, exists := is.images[imageID]; exists { - return imageID, nil - } - - layerID := img.RootFS.ChainID() - - var l layer.Layer - if layerID != "" { - l, err = is.ls.Get(layerID) - if err != nil { - return "", err - } - } - - imageMeta := &imageMeta{ - layer: l, - children: make(map[ID]struct{}), - } - - is.images[imageID] = imageMeta - if err := is.digestSet.Add(imageID.Digest()); err != nil { - delete(is.images, imageID) - return "", err - } - - return imageID, nil -} - -func (is *store) Search(term string) (ID, error) { - is.Lock() - defer is.Unlock() - - dgst, err := is.digestSet.Lookup(term) - if err != nil { - if err == digest.ErrDigestNotFound { - err = fmt.Errorf("No such image: %s", term) - } - return "", err - } - return IDFromDigest(dgst), nil -} - -func (is *store) Get(id ID) (*Image, error) { - // todo: Check if image is in images - // todo: Detect manual insertions and start using them - config, err := is.fs.Get(id.Digest()) - if err != nil { - return nil, err - } - - img, err := NewFromJSON(config) - if err != nil { - return nil, err - } - img.computedID = id - - img.Parent, err = is.GetParent(id) - if err != nil { - img.Parent = "" - } - - return img, nil -} - -func (is *store) Delete(id ID) ([]layer.Metadata, error) { - is.Lock() - defer is.Unlock() - - imageMeta := is.images[id] - if imageMeta == nil { - return nil, fmt.Errorf("unrecognized image ID %s", id.String()) - } - for id := range imageMeta.children { - is.fs.DeleteMetadata(id.Digest(), "parent") - } - if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { - delete(is.images[parent].children, id) - } - - if err := is.digestSet.Remove(id.Digest()); err != nil { - logrus.Errorf("error removing %s from digest set: %q", id, err) - } - delete(is.images, id) - is.fs.Delete(id.Digest()) - - if imageMeta.layer != nil { - return is.ls.Release(imageMeta.layer) - } - return nil, nil -} - -func (is *store) SetParent(id, parent ID) error { - is.Lock() - defer is.Unlock() - parentMeta := is.images[parent] - if parentMeta == nil { - return fmt.Errorf("unknown parent image ID %s", parent.String()) - } - if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { - delete(is.images[parent].children, id) - } - parentMeta.children[id] = struct{}{} - return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent)) -} - -func (is *store) GetParent(id ID) (ID, error) { - d, err := is.fs.GetMetadata(id.Digest(), "parent") - if err != nil { - return "", err - } - return ID(d), nil // todo: validate? -} - -func (is *store) Children(id ID) []ID { - is.Lock() - defer is.Unlock() - - return is.children(id) -} - -func (is *store) children(id ID) []ID { - var ids []ID - if is.images[id] != nil { - for id := range is.images[id].children { - ids = append(ids, id) - } - } - return ids -} - -func (is *store) Heads() map[ID]*Image { - return is.imagesMap(false) -} - -func (is *store) Map() map[ID]*Image { - return is.imagesMap(true) -} - -func (is *store) imagesMap(all bool) map[ID]*Image { - is.Lock() - defer is.Unlock() - - images := make(map[ID]*Image) - - for id := range is.images { - if !all && len(is.children(id)) > 0 { - continue - } - img, err := is.Get(id) - if err != nil { - logrus.Errorf("invalid image access: %q, error: %q", id, err) - continue - } - images[id] = img - } - return images -} diff --git a/vendor/github.com/docker/docker/image/v1/imagev1.go b/vendor/github.com/docker/docker/image/v1/imagev1.go deleted file mode 100644 index d498ddbc0..000000000 --- a/vendor/github.com/docker/docker/image/v1/imagev1.go +++ /dev/null @@ -1,156 +0,0 @@ -package v1 - -import ( - "encoding/json" - "fmt" - "reflect" - "regexp" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" -) - -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) - -// noFallbackMinVersion is the minimum version for which v1compatibility -// information will not be marshaled through the Image struct to remove -// blank fields. -var noFallbackMinVersion = "1.8.3" - -// HistoryFromConfig creates a History struct from v1 configuration JSON -func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { - h := image.History{} - var v1Image image.V1Image - if err := json.Unmarshal(imageJSON, &v1Image); err != nil { - return h, err - } - - return image.History{ - Author: v1Image.Author, - Created: v1Image.Created, - CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), - Comment: v1Image.Comment, - EmptyLayer: emptyLayer, - }, nil -} - -// CreateID creates an ID from v1 image, layerID and parent ID. -// Used for backwards compatibility with old clients. -func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { - v1Image.ID = "" - v1JSON, err := json.Marshal(v1Image) - if err != nil { - return "", err - } - - var config map[string]*json.RawMessage - if err := json.Unmarshal(v1JSON, &config); err != nil { - return "", err - } - - // FIXME: note that this is slightly incompatible with RootFS logic - config["layer_id"] = rawJSON(layerID) - if parent != "" { - config["parent"] = rawJSON(parent) - } - - configJSON, err := json.Marshal(config) - if err != nil { - return "", err - } - logrus.Debugf("CreateV1ID %s", configJSON) - - return digest.FromBytes(configJSON), nil -} - -// MakeConfigFromV1Config creates an image config from the legacy V1 config format. -func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { - var dver struct { - DockerVersion string `json:"docker_version"` - } - - if err := json.Unmarshal(imageJSON, &dver); err != nil { - return nil, err - } - - useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion) - - if useFallback { - var v1Image image.V1Image - err := json.Unmarshal(imageJSON, &v1Image) - if err != nil { - return nil, err - } - imageJSON, err = json.Marshal(v1Image) - if err != nil { - return nil, err - } - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(imageJSON, &c); err != nil { - return nil, err - } - - delete(c, "id") - delete(c, "parent") - delete(c, "Size") // Size is calculated from data on disk and is inconsistent - delete(c, "parent_id") - delete(c, "layer_id") - delete(c, "throwaway") - - c["rootfs"] = rawJSON(rootfs) - c["history"] = rawJSON(history) - - return json.Marshal(c) -} - -// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct -func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Top-level v1compatibility string should be a modified version of the - // image config. - var configAsMap map[string]*json.RawMessage - if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { - return nil, err - } - - // Delete fields that didn't exist in old manifest - imageType := reflect.TypeOf(img).Elem() - for i := 0; i < imageType.NumField(); i++ { - f := imageType.Field(i) - jsonName := strings.Split(f.Tag.Get("json"), ",")[0] - // Parent is handled specially below. - if jsonName != "" && jsonName != "parent" { - delete(configAsMap, jsonName) - } - } - configAsMap["id"] = rawJSON(v1ID) - if parentV1ID != "" { - configAsMap["parent"] = rawJSON(parentV1ID) - } - if throwaway { - configAsMap["throwaway"] = rawJSON(true) - } - - return json.Marshal(configAsMap) -} - -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) -} - -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} diff --git a/vendor/github.com/docker/docker/layer/empty.go b/vendor/github.com/docker/docker/layer/empty.go deleted file mode 100644 index 3b6ffc82f..000000000 --- a/vendor/github.com/docker/docker/layer/empty.go +++ /dev/null @@ -1,56 +0,0 @@ -package layer - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" -) - -// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - -// (1024 NULL bytes) -const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") - -type emptyLayer struct{} - -// EmptyLayer is a layer that corresponds to empty tar. -var EmptyLayer = &emptyLayer{} - -func (el *emptyLayer) TarStream() (io.ReadCloser, error) { - buf := new(bytes.Buffer) - tarWriter := tar.NewWriter(buf) - tarWriter.Close() - return ioutil.NopCloser(buf), nil -} - -func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) { - if p == "" { - return el.TarStream() - } - return nil, fmt.Errorf("can't get parent tar stream of an empty layer") -} - -func (el *emptyLayer) ChainID() ChainID { - return ChainID(DigestSHA256EmptyTar) -} - -func (el *emptyLayer) DiffID() DiffID { - return DigestSHA256EmptyTar -} - -func (el *emptyLayer) Parent() Layer { - return nil -} - -func (el *emptyLayer) Size() (size int64, err error) { - return 0, nil -} - -func (el *emptyLayer) DiffSize() (size int64, err error) { - return 0, nil -} - -func (el *emptyLayer) Metadata() (map[string]string, error) { - return make(map[string]string), nil -} diff --git a/vendor/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go deleted file mode 100644 index 42b45556e..000000000 --- a/vendor/github.com/docker/docker/layer/filestore.go +++ /dev/null @@ -1,354 +0,0 @@ -package layer - -import ( - "compress/gzip" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/ioutils" -) - -var ( - stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) - supportedAlgorithms = []digest.Algorithm{ - digest.SHA256, - // digest.SHA384, // Currently not used - // digest.SHA512, // Currently not used - } -) - -type fileMetadataStore struct { - root string -} - -type fileMetadataTransaction struct { - store *fileMetadataStore - ws *ioutils.AtomicWriteSet -} - -// NewFSMetadataStore returns an instance of a metadata store -// which is backed by files on disk using the provided root -// as the root of metadata files. -func NewFSMetadataStore(root string) (MetadataStore, error) { - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - return &fileMetadataStore{ - root: root, - }, nil -} - -func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { - dgst := digest.Digest(layer) - return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) -} - -func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { - return filepath.Join(fms.getLayerDirectory(layer), filename) -} - -func (fms *fileMetadataStore) getMountDirectory(mount string) string { - return filepath.Join(fms.root, "mounts", mount) -} - -func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { - return filepath.Join(fms.getMountDirectory(mount), filename) -} - -func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { - tmpDir := filepath.Join(fms.root, "tmp") - if err := os.MkdirAll(tmpDir, 0755); err != nil { - return nil, err - } - ws, err := ioutils.NewAtomicWriteSet(tmpDir) - if err != nil { - return nil, err - } - - return &fileMetadataTransaction{ - store: fms, - ws: ws, - }, nil -} - -func (fm *fileMetadataTransaction) SetSize(size int64) error { - content := fmt.Sprintf("%d", size) - return fm.ws.WriteFile("size", []byte(content), 0644) -} - -func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { - return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) -} - -func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { - return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) -} - -func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { - return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) -} - -func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { - jsonRef, err := json.Marshal(ref) - if err != nil { - return err - } - return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) -} - -func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { - f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, err - } - var wc io.WriteCloser - if compressInput { - wc = gzip.NewWriter(f) - } else { - wc = f - } - - return ioutils.NewWriteCloserWrapper(wc, func() error { - wc.Close() - return f.Close() - }), nil -} - -func (fm *fileMetadataTransaction) Commit(layer ChainID) error { - finalDir := fm.store.getLayerDirectory(layer) - if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { - return err - } - - return fm.ws.Commit(finalDir) -} - -func (fm *fileMetadataTransaction) Cancel() error { - return fm.ws.Cancel() -} - -func (fm *fileMetadataTransaction) String() string { - return fm.ws.String() -} - -func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) - if err != nil { - return 0, err - } - - size, err := strconv.ParseInt(string(content), 10, 64) - if err != nil { - return 0, err - } - - return size, nil -} - -func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return ChainID(dgst), nil -} - -func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) - if err != nil { - return "", err - } - - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return DiffID(dgst), nil -} - -func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) - if err != nil { - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid cache id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) - if err != nil { - if os.IsNotExist(err) { - // only return empty descriptor to represent what is stored - return distribution.Descriptor{}, nil - } - return distribution.Descriptor{}, err - } - - var ref distribution.Descriptor - err = json.Unmarshal(content, &ref) - if err != nil { - return distribution.Descriptor{}, err - } - return ref, err -} - -func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { - fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) - if err != nil { - return nil, err - } - f, err := gzip.NewReader(fz) - if err != nil { - return nil, err - } - - return ioutils.NewReadCloserWrapper(f, func() error { - f.Close() - return fz.Close() - }), nil -} - -func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) -} - -func (fms *fileMetadataStore) SetInitID(mount string, init string) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) -} - -func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) -} - -func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) - if err != nil { - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid mount id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid init id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { - content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return ChainID(dgst), nil -} - -func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { - var ids []ChainID - for _, algorithm := range supportedAlgorithms { - fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) - if err != nil { - if os.IsNotExist(err) { - continue - } - return nil, nil, err - } - - for _, fi := range fileInfos { - if fi.IsDir() && fi.Name() != "mounts" { - dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) - if err := dgst.Validate(); err != nil { - logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) - } else { - ids = append(ids, ChainID(dgst)) - } - } - } - } - - fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) - if err != nil { - if os.IsNotExist(err) { - return ids, []string{}, nil - } - return nil, nil, err - } - - var mounts []string - for _, fi := range fileInfos { - if fi.IsDir() { - mounts = append(mounts, fi.Name()) - } - } - - return ids, mounts, nil -} - -func (fms *fileMetadataStore) Remove(layer ChainID) error { - return os.RemoveAll(fms.getLayerDirectory(layer)) -} - -func (fms *fileMetadataStore) RemoveMount(mount string) error { - return os.RemoveAll(fms.getMountDirectory(mount)) -} diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go deleted file mode 100644 index ec1d4346d..000000000 --- a/vendor/github.com/docker/docker/layer/layer.go +++ /dev/null @@ -1,275 +0,0 @@ -// Package layer is package for managing read-only -// and read-write mounts on the union file system -// driver. Read-only mounts are referenced using a -// content hash and are protected from mutation in -// the exposed interface. The tar format is used -// to create read-only layers and export both -// read-only and writable layers. The exported -// tar data for a read-only layer should match -// the tar used to create the layer. -package layer - -import ( - "errors" - "io" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/archive" -) - -var ( - // ErrLayerDoesNotExist is used when an operation is - // attempted on a layer which does not exist. - ErrLayerDoesNotExist = errors.New("layer does not exist") - - // ErrLayerNotRetained is used when a release is - // attempted on a layer which is not retained. - ErrLayerNotRetained = errors.New("layer not retained") - - // ErrMountDoesNotExist is used when an operation is - // attempted on a mount layer which does not exist. - ErrMountDoesNotExist = errors.New("mount does not exist") - - // ErrMountNameConflict is used when a mount is attempted - // to be created but there is already a mount with the name - // used for creation. - ErrMountNameConflict = errors.New("mount already exists with name") - - // ErrActiveMount is used when an operation on a - // mount is attempted but the layer is still - // mounted and the operation cannot be performed. - ErrActiveMount = errors.New("mount still active") - - // ErrNotMounted is used when requesting an active - // mount but the layer is not mounted. - ErrNotMounted = errors.New("not mounted") - - // ErrMaxDepthExceeded is used when a layer is attempted - // to be created which would result in a layer depth - // greater than the 125 max. - ErrMaxDepthExceeded = errors.New("max depth exceeded") - - // ErrNotSupported is used when the action is not supported - // on the current platform - ErrNotSupported = errors.New("not support on this platform") -) - -// ChainID is the content-addressable ID of a layer. -type ChainID digest.Digest - -// String returns a string rendition of a layer ID -func (id ChainID) String() string { - return string(id) -} - -// DiffID is the hash of an individual layer tar. -type DiffID digest.Digest - -// String returns a string rendition of a layer DiffID -func (diffID DiffID) String() string { - return string(diffID) -} - -// TarStreamer represents an object which may -// have its contents exported as a tar stream. -type TarStreamer interface { - // TarStream returns a tar archive stream - // for the contents of a layer. - TarStream() (io.ReadCloser, error) -} - -// Layer represents a read-only layer -type Layer interface { - TarStreamer - - // TarStreamFrom returns a tar archive stream for all the layer chain with - // arbitrary depth. - TarStreamFrom(ChainID) (io.ReadCloser, error) - - // ChainID returns the content hash of the entire layer chain. The hash - // chain is made up of DiffID of top layer and all of its parents. - ChainID() ChainID - - // DiffID returns the content hash of the layer - // tar stream used to create this layer. - DiffID() DiffID - - // Parent returns the next layer in the layer chain. - Parent() Layer - - // Size returns the size of the entire layer chain. The size - // is calculated from the total size of all files in the layers. - Size() (int64, error) - - // DiffSize returns the size difference of the top layer - // from parent layer. - DiffSize() (int64, error) - - // Metadata returns the low level storage metadata associated - // with layer. - Metadata() (map[string]string, error) -} - -// RWLayer represents a layer which is -// read and writable -type RWLayer interface { - TarStreamer - - // Name of mounted layer - Name() string - - // Parent returns the layer which the writable - // layer was created from. - Parent() Layer - - // Mount mounts the RWLayer and returns the filesystem path - // the to the writable layer. - Mount(mountLabel string) (string, error) - - // Unmount unmounts the RWLayer. This should be called - // for every mount. If there are multiple mount calls - // this operation will only decrement the internal mount counter. - Unmount() error - - // Size represents the size of the writable layer - // as calculated by the total size of the files - // changed in the mutable layer. - Size() (int64, error) - - // Changes returns the set of changes for the mutable layer - // from the base layer. - Changes() ([]archive.Change, error) - - // Metadata returns the low level metadata for the mutable layer - Metadata() (map[string]string, error) -} - -// Metadata holds information about a -// read-only layer -type Metadata struct { - // ChainID is the content hash of the layer - ChainID ChainID - - // DiffID is the hash of the tar data used to - // create the layer - DiffID DiffID - - // Size is the size of the layer and all parents - Size int64 - - // DiffSize is the size of the top layer - DiffSize int64 -} - -// MountInit is a function to initialize a -// writable mount. Changes made here will -// not be included in the Tar stream of the -// RWLayer. -type MountInit func(root string) error - -// Store represents a backend for managing both -// read-only and read-write layers. -type Store interface { - Register(io.Reader, ChainID) (Layer, error) - Get(ChainID) (Layer, error) - Map() map[ChainID]Layer - Release(Layer) ([]Metadata, error) - - CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) - GetRWLayer(id string) (RWLayer, error) - GetMountID(id string) (string, error) - ReleaseRWLayer(RWLayer) ([]Metadata, error) - - Cleanup() error - DriverStatus() [][2]string - DriverName() string -} - -// DescribableStore represents a layer store capable of storing -// descriptors for layers. -type DescribableStore interface { - RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) -} - -// MetadataTransaction represents functions for setting layer metadata -// with a single transaction. -type MetadataTransaction interface { - SetSize(int64) error - SetParent(parent ChainID) error - SetDiffID(DiffID) error - SetCacheID(string) error - SetDescriptor(distribution.Descriptor) error - TarSplitWriter(compressInput bool) (io.WriteCloser, error) - - Commit(ChainID) error - Cancel() error - String() string -} - -// MetadataStore represents a backend for persisting -// metadata about layers and providing the metadata -// for restoring a Store. -type MetadataStore interface { - // StartTransaction starts an update for new metadata - // which will be used to represent an ID on commit. - StartTransaction() (MetadataTransaction, error) - - GetSize(ChainID) (int64, error) - GetParent(ChainID) (ChainID, error) - GetDiffID(ChainID) (DiffID, error) - GetCacheID(ChainID) (string, error) - GetDescriptor(ChainID) (distribution.Descriptor, error) - TarSplitReader(ChainID) (io.ReadCloser, error) - - SetMountID(string, string) error - SetInitID(string, string) error - SetMountParent(string, ChainID) error - - GetMountID(string) (string, error) - GetInitID(string) (string, error) - GetMountParent(string) (ChainID, error) - - // List returns the full list of referenced - // read-only and read-write layers - List() ([]ChainID, []string, error) - - Remove(ChainID) error - RemoveMount(string) error -} - -// CreateChainID returns ID for a layerDigest slice -func CreateChainID(dgsts []DiffID) ChainID { - return createChainIDFromParent("", dgsts...) -} - -func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { - if len(dgsts) == 0 { - return parent - } - if parent == "" { - return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) - } - // H = "H(n-1) SHA256(n)" - dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) - return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) -} - -// ReleaseAndLog releases the provided layer from the given layer -// store, logging any error and release metadata -func ReleaseAndLog(ls Store, l Layer) { - metadata, err := ls.Release(l) - if err != nil { - logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) - } - LogReleaseMetadata(metadata) -} - -// LogReleaseMetadata logs a metadata array, uses this to -// ensure consistent logging for release metadata -func LogReleaseMetadata(metadatas []Metadata) { - for _, metadata := range metadatas { - logrus.Infof("Layer %s cleaned up", metadata.ChainID) - } -} diff --git a/vendor/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go deleted file mode 100644 index 1a1ff9fe5..000000000 --- a/vendor/github.com/docker/docker/layer/layer_store.go +++ /dev/null @@ -1,684 +0,0 @@ -package layer - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/stringid" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// maxLayerDepth represents the maximum number of -// layers which can be chained together. 125 was -// chosen to account for the 127 max in some -// graphdrivers plus the 2 additional layers -// used to create a rwlayer. -const maxLayerDepth = 125 - -type layerStore struct { - store MetadataStore - driver graphdriver.Driver - - layerMap map[ChainID]*roLayer - layerL sync.Mutex - - mounts map[string]*mountedLayer - mountL sync.Mutex -} - -// StoreOptions are the options used to create a new Store instance -type StoreOptions struct { - StorePath string - MetadataStorePathTemplate string - GraphDriver string - GraphDriverOptions []string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - PluginGetter plugingetter.PluginGetter - ExperimentalEnabled bool -} - -// NewStoreFromOptions creates a new Store instance -func NewStoreFromOptions(options StoreOptions) (Store, error) { - driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ - Root: options.StorePath, - DriverOptions: options.GraphDriverOptions, - UIDMaps: options.UIDMaps, - GIDMaps: options.GIDMaps, - ExperimentalEnabled: options.ExperimentalEnabled, - }) - if err != nil { - return nil, fmt.Errorf("error initializing graphdriver: %v", err) - } - logrus.Debugf("Using graph driver %s", driver) - - fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) - if err != nil { - return nil, err - } - - return NewStoreFromGraphDriver(fms, driver) -} - -// NewStoreFromGraphDriver creates a new Store instance using the provided -// metadata store and graph driver. The metadata store will be used to restore -// the Store. -func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { - ls := &layerStore{ - store: store, - driver: driver, - layerMap: map[ChainID]*roLayer{}, - mounts: map[string]*mountedLayer{}, - } - - ids, mounts, err := store.List() - if err != nil { - return nil, err - } - - for _, id := range ids { - l, err := ls.loadLayer(id) - if err != nil { - logrus.Debugf("Failed to load layer %s: %s", id, err) - continue - } - if l.parent != nil { - l.parent.referenceCount++ - } - } - - for _, mount := range mounts { - if err := ls.loadMount(mount); err != nil { - logrus.Debugf("Failed to load mount %s: %s", mount, err) - } - } - - return ls, nil -} - -func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { - cl, ok := ls.layerMap[layer] - if ok { - return cl, nil - } - - diff, err := ls.store.GetDiffID(layer) - if err != nil { - return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) - } - - size, err := ls.store.GetSize(layer) - if err != nil { - return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) - } - - cacheID, err := ls.store.GetCacheID(layer) - if err != nil { - return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) - } - - parent, err := ls.store.GetParent(layer) - if err != nil { - return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) - } - - descriptor, err := ls.store.GetDescriptor(layer) - if err != nil { - return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) - } - - cl = &roLayer{ - chainID: layer, - diffID: diff, - size: size, - cacheID: cacheID, - layerStore: ls, - references: map[Layer]struct{}{}, - descriptor: descriptor, - } - - if parent != "" { - p, err := ls.loadLayer(parent) - if err != nil { - return nil, err - } - cl.parent = p - } - - ls.layerMap[cl.chainID] = cl - - return cl, nil -} - -func (ls *layerStore) loadMount(mount string) error { - if _, ok := ls.mounts[mount]; ok { - return nil - } - - mountID, err := ls.store.GetMountID(mount) - if err != nil { - return err - } - - initID, err := ls.store.GetInitID(mount) - if err != nil { - return err - } - - parent, err := ls.store.GetMountParent(mount) - if err != nil { - return err - } - - ml := &mountedLayer{ - name: mount, - mountID: mountID, - initID: initID, - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - if parent != "" { - p, err := ls.loadLayer(parent) - if err != nil { - return err - } - ml.parent = p - - p.referenceCount++ - } - - ls.mounts[ml.name] = ml - - return nil -} - -func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { - digester := digest.Canonical.New() - tr := io.TeeReader(ts, digester.Hash()) - - tsw, err := tx.TarSplitWriter(true) - if err != nil { - return err - } - metaPacker := storage.NewJSONPacker(tsw) - defer tsw.Close() - - // we're passing nil here for the file putter, because the ApplyDiff will - // handle the extraction of the archive - rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) - if err != nil { - return err - } - - applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) - if err != nil { - return err - } - - // Discard trailing data but ensure metadata is picked up to reconstruct stream - io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed - - layer.size = applySize - layer.diffID = DiffID(digester.Digest()) - - logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) - - return nil -} - -func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { - return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{}) -} - -func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { - // err is used to hold the error which will always trigger - // cleanup of creates sources but may not be an error returned - // to the caller (already exists). - var err error - var pid string - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - pid = p.cacheID - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - if p.depth() >= maxLayerDepth { - err = ErrMaxDepthExceeded - return nil, err - } - } - - // Create new roLayer - layer := &roLayer{ - parent: p, - cacheID: stringid.GenerateRandomID(), - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - descriptor: descriptor, - } - - if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { - return nil, err - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) - if err := ls.driver.Remove(layer.cacheID); err != nil { - logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) - } - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - if err = ls.applyTar(tx, ts, pid, layer); err != nil { - return nil, err - } - - if layer.parent == nil { - layer.chainID = ChainID(layer.diffID) - } else { - layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) - } - - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return the error - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { - l, ok := ls.layerMap[layer] - if !ok { - return nil - } - - l.referenceCount++ - - return l -} - -func (ls *layerStore) get(l ChainID) *roLayer { - ls.layerL.Lock() - defer ls.layerL.Unlock() - return ls.getWithoutLock(l) -} - -func (ls *layerStore) Get(l ChainID) (Layer, error) { - ls.layerL.Lock() - defer ls.layerL.Unlock() - - layer := ls.getWithoutLock(l) - if layer == nil { - return nil, ErrLayerDoesNotExist - } - - return layer.getReference(), nil -} - -func (ls *layerStore) Map() map[ChainID]Layer { - ls.layerL.Lock() - defer ls.layerL.Unlock() - - layers := map[ChainID]Layer{} - - for k, v := range ls.layerMap { - layers[k] = v - } - - return layers -} - -func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { - err := ls.driver.Remove(layer.cacheID) - if err != nil { - return err - } - - err = ls.store.Remove(layer.chainID) - if err != nil { - return err - } - metadata.DiffID = layer.diffID - metadata.ChainID = layer.chainID - metadata.Size, err = layer.Size() - if err != nil { - return err - } - metadata.DiffSize = layer.size - - return nil -} - -func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { - depth := 0 - removed := []Metadata{} - for { - if l.referenceCount == 0 { - panic("layer not retained") - } - l.referenceCount-- - if l.referenceCount != 0 { - return removed, nil - } - - if len(removed) == 0 && depth > 0 { - panic("cannot remove layer with child") - } - if l.hasReferences() { - panic("cannot delete referenced layer") - } - var metadata Metadata - if err := ls.deleteLayer(l, &metadata); err != nil { - return nil, err - } - - delete(ls.layerMap, l.chainID) - removed = append(removed, metadata) - - if l.parent == nil { - return removed, nil - } - - depth++ - l = l.parent - } -} - -func (ls *layerStore) Release(l Layer) ([]Metadata, error) { - ls.layerL.Lock() - defer ls.layerL.Unlock() - layer, ok := ls.layerMap[l.ChainID()] - if !ok { - return []Metadata{}, nil - } - if !layer.hasReference(l) { - return nil, ErrLayerNotRetained - } - - layer.deleteReference(l) - - return ls.releaseLayer(layer) -} - -func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[name] - if ok { - return nil, ErrMountNameConflict - } - - var err error - var pid string - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - pid = p.cacheID - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - m = &mountedLayer{ - name: name, - parent: p, - mountID: ls.mountID(name), - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - if initFunc != nil { - pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) - if err != nil { - return nil, err - } - m.initID = pid - } - - createOpts := &graphdriver.CreateOpts{ - StorageOpt: storageOpt, - } - - if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { - return nil, err - } - - if err = ls.saveMount(m); err != nil { - return nil, err - } - - return m.getReference(), nil -} - -func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - mount, ok := ls.mounts[id] - if !ok { - return nil, ErrMountDoesNotExist - } - - return mount.getReference(), nil -} - -func (ls *layerStore) GetMountID(id string) (string, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - mount, ok := ls.mounts[id] - if !ok { - return "", ErrMountDoesNotExist - } - logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) - - return mount.mountID, nil -} - -func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[l.Name()] - if !ok { - return []Metadata{}, nil - } - - if err := m.deleteReference(l); err != nil { - return nil, err - } - - if m.hasReferences() { - return []Metadata{}, nil - } - - if err := ls.driver.Remove(m.mountID); err != nil { - logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) - m.retakeReference(l) - return nil, err - } - - if m.initID != "" { - if err := ls.driver.Remove(m.initID); err != nil { - logrus.Errorf("Error removing init layer %s: %s", m.name, err) - m.retakeReference(l) - return nil, err - } - } - - if err := ls.store.RemoveMount(m.name); err != nil { - logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) - m.retakeReference(l) - return nil, err - } - - delete(ls.mounts, m.Name()) - - ls.layerL.Lock() - defer ls.layerL.Unlock() - if m.parent != nil { - return ls.releaseLayer(m.parent) - } - - return []Metadata{}, nil -} - -func (ls *layerStore) saveMount(mount *mountedLayer) error { - if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { - return err - } - - if mount.initID != "" { - if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { - return err - } - } - - if mount.parent != nil { - if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { - return err - } - } - - ls.mounts[mount.name] = mount - - return nil -} - -func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { - // Use "-init" to maintain compatibility with graph drivers - // which are expecting this layer with this special name. If all - // graph drivers can be updated to not rely on knowing about this layer - // then the initID should be randomly generated. - initID := fmt.Sprintf("%s-init", graphID) - - createOpts := &graphdriver.CreateOpts{ - MountLabel: mountLabel, - StorageOpt: storageOpt, - } - - if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil { - return "", err - } - p, err := ls.driver.Get(initID, "") - if err != nil { - return "", err - } - - if err := initFunc(p); err != nil { - ls.driver.Put(initID) - return "", err - } - - if err := ls.driver.Put(initID); err != nil { - return "", err - } - - return initID, nil -} - -func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { - diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) - if !ok { - diffDriver = &naiveDiffPathDriver{ls.driver} - } - - defer metadata.Close() - - // get our relative path to the container - fileGetCloser, err := diffDriver.DiffGetter(graphID) - if err != nil { - return err - } - defer fileGetCloser.Close() - - metaUnpacker := storage.NewJSONUnpacker(metadata) - upackerCounter := &unpackSizeCounter{metaUnpacker, size} - logrus.Debugf("Assembling tar data for %s", graphID) - return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) -} - -func (ls *layerStore) Cleanup() error { - return ls.driver.Cleanup() -} - -func (ls *layerStore) DriverStatus() [][2]string { - return ls.driver.Status() -} - -func (ls *layerStore) DriverName() string { - return ls.driver.String() -} - -type naiveDiffPathDriver struct { - graphdriver.Driver -} - -type fileGetPutter struct { - storage.FileGetter - driver graphdriver.Driver - id string -} - -func (w *fileGetPutter) Close() error { - return w.driver.Put(w.id) -} - -func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - p, err := n.Driver.Get(id, "") - if err != nil { - return nil, err - } - return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil -} diff --git a/vendor/github.com/docker/docker/layer/layer_store_windows.go b/vendor/github.com/docker/docker/layer/layer_store_windows.go deleted file mode 100644 index 1276a912c..000000000 --- a/vendor/github.com/docker/docker/layer/layer_store_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package layer - -import ( - "io" - - "github.com/docker/distribution" -) - -func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { - return ls.registerWithDescriptor(ts, parent, descriptor) -} diff --git a/vendor/github.com/docker/docker/layer/layer_unix.go b/vendor/github.com/docker/docker/layer/layer_unix.go deleted file mode 100644 index 776b78ac0..000000000 --- a/vendor/github.com/docker/docker/layer/layer_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build linux freebsd darwin openbsd solaris - -package layer - -import "github.com/docker/docker/pkg/stringid" - -func (ls *layerStore) mountID(name string) string { - return stringid.GenerateRandomID() -} diff --git a/vendor/github.com/docker/docker/layer/layer_windows.go b/vendor/github.com/docker/docker/layer/layer_windows.go deleted file mode 100644 index e20311a09..000000000 --- a/vendor/github.com/docker/docker/layer/layer_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -package layer - -import ( - "errors" - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/graphdriver" -) - -// GetLayerPath returns the path to a layer -func GetLayerPath(s Store, layer ChainID) (string, error) { - ls, ok := s.(*layerStore) - if !ok { - return "", errors.New("unsupported layer store") - } - ls.layerL.Lock() - defer ls.layerL.Unlock() - - rl, ok := ls.layerMap[layer] - if !ok { - return "", ErrLayerDoesNotExist - } - - path, err := ls.driver.Get(rl.cacheID, "") - if err != nil { - return "", err - } - - if err := ls.driver.Put(rl.cacheID); err != nil { - return "", err - } - - return path, nil -} - -func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) { - var err error // this is used for cleanup in existingLayer case - diffID := digest.FromBytes([]byte(graphID)) - - // Create new roLayer - layer := &roLayer{ - cacheID: graphID, - diffID: DiffID(diffID), - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - size: size, - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - defer func() { - if err != nil { - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - layer.chainID = createChainIDFromParent("", layer.diffID) - - if !ls.driver.Exists(layer.cacheID) { - return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID) - } - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -func (ls *layerStore) mountID(name string) string { - // windows has issues if container ID doesn't match mount ID - return name -} - -func (ls *layerStore) GraphDriver() graphdriver.Driver { - return ls.driver -} diff --git a/vendor/github.com/docker/docker/layer/migration.go b/vendor/github.com/docker/docker/layer/migration.go deleted file mode 100644 index b45c31099..000000000 --- a/vendor/github.com/docker/docker/layer/migration.go +++ /dev/null @@ -1,256 +0,0 @@ -package layer - -import ( - "compress/gzip" - "errors" - "fmt" - "io" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// CreateRWLayerByGraphID creates a RWLayer in the layer store using -// the provided name with the given graphID. To get the RWLayer -// after migration the layer may be retrieved by the given name. -func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[name] - if ok { - if m.parent.chainID != parent { - return errors.New("name conflict, mismatched parent") - } - if m.mountID != graphID { - return errors.New("mount already exists") - } - - return nil - } - - if !ls.driver.Exists(graphID) { - return fmt.Errorf("graph ID does not exist: %q", graphID) - } - - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return ErrLayerDoesNotExist - } - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - // TODO: Ensure graphID has correct parent - - m = &mountedLayer{ - name: name, - parent: p, - mountID: graphID, - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - // Check for existing init layer - initID := fmt.Sprintf("%s-init", graphID) - if ls.driver.Exists(initID) { - m.initID = initID - } - - if err = ls.saveMount(m); err != nil { - return err - } - - return nil -} - -func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { - defer func() { - if err != nil { - logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) - diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) - } - }() - - if oldTarDataPath == "" { - err = errors.New("no tar-split file") - return - } - - tarDataFile, err := os.Open(oldTarDataPath) - if err != nil { - return - } - defer tarDataFile.Close() - uncompressed, err := gzip.NewReader(tarDataFile) - if err != nil { - return - } - - dgst := digest.Canonical.New() - err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) - if err != nil { - return - } - - diffID = DiffID(dgst.Digest()) - err = os.RemoveAll(newTarDataPath) - if err != nil { - return - } - err = os.Link(oldTarDataPath, newTarDataPath) - - return -} - -func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { - rawarchive, err := ls.driver.Diff(id, parent) - if err != nil { - return - } - defer rawarchive.Close() - - f, err := os.Create(newTarDataPath) - if err != nil { - return - } - defer f.Close() - mfz := gzip.NewWriter(f) - defer mfz.Close() - metaPacker := storage.NewJSONPacker(mfz) - - packerCounter := &packSizeCounter{metaPacker, &size} - - archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) - if err != nil { - return - } - dgst, err := digest.FromReader(archive) - if err != nil { - return - } - diffID = DiffID(dgst) - return -} - -func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { - // err is used to hold the error which will always trigger - // cleanup of creates sources but may not be an error returned - // to the caller (already exists). - var err error - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - // Create new roLayer - layer := &roLayer{ - parent: p, - cacheID: graphID, - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - diffID: diffID, - size: size, - chainID: createChainIDFromParent(parent, diffID), - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - tsw, err := tx.TarSplitWriter(false) - if err != nil { - return nil, err - } - defer tsw.Close() - tdf, err := os.Open(tarDataFile) - if err != nil { - return nil, err - } - defer tdf.Close() - _, err = io.Copy(tsw, tdf) - if err != nil { - return nil, err - } - - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -type unpackSizeCounter struct { - unpacker storage.Unpacker - size *int64 -} - -func (u *unpackSizeCounter) Next() (*storage.Entry, error) { - e, err := u.unpacker.Next() - if err == nil && u.size != nil { - *u.size += e.Size - } - return e, err -} - -type packSizeCounter struct { - packer storage.Packer - size *int64 -} - -func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { - n, err := p.packer.AddEntry(e) - if err == nil && p.size != nil { - *p.size += e.Size - } - return n, err -} diff --git a/vendor/github.com/docker/docker/layer/mounted_layer.go b/vendor/github.com/docker/docker/layer/mounted_layer.go deleted file mode 100644 index a5cfcfa9b..000000000 --- a/vendor/github.com/docker/docker/layer/mounted_layer.go +++ /dev/null @@ -1,99 +0,0 @@ -package layer - -import ( - "io" - - "github.com/docker/docker/pkg/archive" -) - -type mountedLayer struct { - name string - mountID string - initID string - parent *roLayer - path string - layerStore *layerStore - - references map[RWLayer]*referencedRWLayer -} - -func (ml *mountedLayer) cacheParent() string { - if ml.initID != "" { - return ml.initID - } - if ml.parent != nil { - return ml.parent.cacheID - } - return "" -} - -func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { - return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) -} - -func (ml *mountedLayer) Name() string { - return ml.name -} - -func (ml *mountedLayer) Parent() Layer { - if ml.parent != nil { - return ml.parent - } - - // Return a nil interface instead of an interface wrapping a nil - // pointer. - return nil -} - -func (ml *mountedLayer) Size() (int64, error) { - return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) -} - -func (ml *mountedLayer) Changes() ([]archive.Change, error) { - return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) -} - -func (ml *mountedLayer) Metadata() (map[string]string, error) { - return ml.layerStore.driver.GetMetadata(ml.mountID) -} - -func (ml *mountedLayer) getReference() RWLayer { - ref := &referencedRWLayer{ - mountedLayer: ml, - } - ml.references[ref] = ref - - return ref -} - -func (ml *mountedLayer) hasReferences() bool { - return len(ml.references) > 0 -} - -func (ml *mountedLayer) deleteReference(ref RWLayer) error { - if _, ok := ml.references[ref]; !ok { - return ErrLayerNotRetained - } - delete(ml.references, ref) - return nil -} - -func (ml *mountedLayer) retakeReference(r RWLayer) { - if ref, ok := r.(*referencedRWLayer); ok { - ml.references[ref] = ref - } -} - -type referencedRWLayer struct { - *mountedLayer -} - -func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { - return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) -} - -// Unmount decrements the activity count and unmounts the underlying layer -// Callers should only call `Unmount` once per call to `Mount`, even on error. -func (rl *referencedRWLayer) Unmount() error { - return rl.layerStore.driver.Put(rl.mountedLayer.mountID) -} diff --git a/vendor/github.com/docker/docker/layer/ro_layer.go b/vendor/github.com/docker/docker/layer/ro_layer.go deleted file mode 100644 index 7c8d233a3..000000000 --- a/vendor/github.com/docker/docker/layer/ro_layer.go +++ /dev/null @@ -1,192 +0,0 @@ -package layer - -import ( - "fmt" - "io" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" -) - -type roLayer struct { - chainID ChainID - diffID DiffID - parent *roLayer - cacheID string - size int64 - layerStore *layerStore - descriptor distribution.Descriptor - - referenceCount int - references map[Layer]struct{} -} - -// TarStream for roLayer guarentees that the data that is produced is the exact -// data that the layer was registered with. -func (rl *roLayer) TarStream() (io.ReadCloser, error) { - r, err := rl.layerStore.store.TarSplitReader(rl.chainID) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - go func() { - err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw) - if err != nil { - pw.CloseWithError(err) - } else { - pw.Close() - } - }() - rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID)) - if err != nil { - return nil, err - } - return rc, nil -} - -// TarStreamFrom does not make any guarentees to the correctness of the produced -// data. As such it should not be used when the layer content must be verified -// to be an exact match to the registered layer. -func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { - var parentCacheID string - for pl := rl.parent; pl != nil; pl = pl.parent { - if pl.chainID == parent { - parentCacheID = pl.cacheID - break - } - } - - if parent != ChainID("") && parentCacheID == "" { - return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent) - } - return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) -} - -func (rl *roLayer) ChainID() ChainID { - return rl.chainID -} - -func (rl *roLayer) DiffID() DiffID { - return rl.diffID -} - -func (rl *roLayer) Parent() Layer { - if rl.parent == nil { - return nil - } - return rl.parent -} - -func (rl *roLayer) Size() (size int64, err error) { - if rl.parent != nil { - size, err = rl.parent.Size() - if err != nil { - return - } - } - - return size + rl.size, nil -} - -func (rl *roLayer) DiffSize() (size int64, err error) { - return rl.size, nil -} - -func (rl *roLayer) Metadata() (map[string]string, error) { - return rl.layerStore.driver.GetMetadata(rl.cacheID) -} - -type referencedCacheLayer struct { - *roLayer -} - -func (rl *roLayer) getReference() Layer { - ref := &referencedCacheLayer{ - roLayer: rl, - } - rl.references[ref] = struct{}{} - - return ref -} - -func (rl *roLayer) hasReference(ref Layer) bool { - _, ok := rl.references[ref] - return ok -} - -func (rl *roLayer) hasReferences() bool { - return len(rl.references) > 0 -} - -func (rl *roLayer) deleteReference(ref Layer) { - delete(rl.references, ref) -} - -func (rl *roLayer) depth() int { - if rl.parent == nil { - return 1 - } - return rl.parent.depth() + 1 -} - -func storeLayer(tx MetadataTransaction, layer *roLayer) error { - if err := tx.SetDiffID(layer.diffID); err != nil { - return err - } - if err := tx.SetSize(layer.size); err != nil { - return err - } - if err := tx.SetCacheID(layer.cacheID); err != nil { - return err - } - // Do not store empty descriptors - if layer.descriptor.Digest != "" { - if err := tx.SetDescriptor(layer.descriptor); err != nil { - return err - } - } - if layer.parent != nil { - if err := tx.SetParent(layer.parent.chainID); err != nil { - return err - } - } - - return nil -} - -func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - return nil, err - } - return &verifiedReadCloser{ - rc: rc, - dgst: dgst, - verifier: verifier, - }, nil -} - -type verifiedReadCloser struct { - rc io.ReadCloser - dgst digest.Digest - verifier digest.Verifier -} - -func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { - n, err = vrc.rc.Read(p) - if n > 0 { - if n, err := vrc.verifier.Write(p[:n]); err != nil { - return n, err - } - } - if err == io.EOF { - if !vrc.verifier.Verified() { - err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) - } - } - return -} -func (vrc *verifiedReadCloser) Close() error { - return vrc.rc.Close() -} diff --git a/vendor/github.com/docker/docker/layer/ro_layer_windows.go b/vendor/github.com/docker/docker/layer/ro_layer_windows.go deleted file mode 100644 index 32bd7182a..000000000 --- a/vendor/github.com/docker/docker/layer/ro_layer_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package layer - -import "github.com/docker/distribution" - -var _ distribution.Describable = &roLayer{} - -func (rl *roLayer) Descriptor() distribution.Descriptor { - return rl.descriptor -} diff --git a/vendor/github.com/docker/docker/oci/defaults_linux.go b/vendor/github.com/docker/docker/oci/defaults_linux.go deleted file mode 100644 index 8b3ce7281..000000000 --- a/vendor/github.com/docker/docker/oci/defaults_linux.go +++ /dev/null @@ -1,168 +0,0 @@ -package oci - -import ( - "os" - "runtime" - - "github.com/opencontainers/runtime-spec/specs-go" -) - -func sPtr(s string) *string { return &s } -func iPtr(i int64) *int64 { return &i } -func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } -func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } - -// DefaultSpec returns default oci spec used by docker. -func DefaultSpec() specs.Spec { - s := specs.Spec{ - Version: specs.Version, - Platform: specs.Platform{ - OS: runtime.GOOS, - Arch: runtime.GOARCH, - }, - } - s.Mounts = []specs.Mount{ - { - Destination: "/proc", - Type: "proc", - Source: "proc", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/dev", - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755"}, - }, - { - Destination: "/dev/pts", - Type: "devpts", - Source: "devpts", - Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, - }, - { - Destination: "/sys", - Type: "sysfs", - Source: "sysfs", - Options: []string{"nosuid", "noexec", "nodev", "ro"}, - }, - { - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{"ro", "nosuid", "noexec", "nodev"}, - }, - { - Destination: "/dev/mqueue", - Type: "mqueue", - Source: "mqueue", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - } - s.Process.Capabilities = []string{ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", - } - - s.Linux = &specs.Linux{ - MaskedPaths: []string{ - "/proc/kcore", - "/proc/latency_stats", - "/proc/timer_list", - "/proc/timer_stats", - "/proc/sched_debug", - "/sys/firmware", - }, - ReadonlyPaths: []string{ - "/proc/asound", - "/proc/bus", - "/proc/fs", - "/proc/irq", - "/proc/sys", - "/proc/sysrq-trigger", - }, - Namespaces: []specs.Namespace{ - {Type: "mount"}, - {Type: "network"}, - {Type: "uts"}, - {Type: "pid"}, - {Type: "ipc"}, - }, - // Devices implicitly contains the following devices: - // null, zero, full, random, urandom, tty, console, and ptmx. - // ptmx is a bind-mount or symlink of the container's ptmx. - // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices - Devices: []specs.Device{}, - Resources: &specs.Resources{ - Devices: []specs.DeviceCgroup{ - { - Allow: false, - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(1), - Minor: iPtr(5), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(1), - Minor: iPtr(3), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(1), - Minor: iPtr(9), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(1), - Minor: iPtr(8), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(5), - Minor: iPtr(0), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(5), - Minor: iPtr(1), - Access: sPtr("rwm"), - }, - { - Allow: false, - Type: sPtr("c"), - Major: iPtr(10), - Minor: iPtr(229), - Access: sPtr("rwm"), - }, - }, - }, - } - - return s -} diff --git a/vendor/github.com/docker/docker/oci/defaults_solaris.go b/vendor/github.com/docker/docker/oci/defaults_solaris.go deleted file mode 100644 index 85c8b68e1..000000000 --- a/vendor/github.com/docker/docker/oci/defaults_solaris.go +++ /dev/null @@ -1,20 +0,0 @@ -package oci - -import ( - "runtime" - - "github.com/opencontainers/runtime-spec/specs-go" -) - -// DefaultSpec returns default oci spec used by docker. -func DefaultSpec() specs.Spec { - s := specs.Spec{ - Version: "0.6.0", - Platform: specs.Platform{ - OS: "SunOS", - Arch: runtime.GOARCH, - }, - } - s.Solaris = &specs.Solaris{} - return s -} diff --git a/vendor/github.com/docker/docker/oci/defaults_windows.go b/vendor/github.com/docker/docker/oci/defaults_windows.go deleted file mode 100644 index ab51904ec..000000000 --- a/vendor/github.com/docker/docker/oci/defaults_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -package oci - -import ( - "runtime" - - "github.com/opencontainers/runtime-spec/specs-go" -) - -// DefaultSpec returns default spec used by docker. -func DefaultSpec() specs.Spec { - return specs.Spec{ - Version: specs.Version, - Platform: specs.Platform{ - OS: runtime.GOOS, - Arch: runtime.GOARCH, - }, - Windows: &specs.Windows{}, - } -} diff --git a/vendor/github.com/docker/docker/oci/devices_linux.go b/vendor/github.com/docker/docker/oci/devices_linux.go deleted file mode 100644 index 2840d2586..000000000 --- a/vendor/github.com/docker/docker/oci/devices_linux.go +++ /dev/null @@ -1,86 +0,0 @@ -package oci - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/devices" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// Device transforms a libcontainer configs.Device to a specs.Device object. -func Device(d *configs.Device) specs.Device { - return specs.Device{ - Type: string(d.Type), - Path: d.Path, - Major: d.Major, - Minor: d.Minor, - FileMode: fmPtr(int64(d.FileMode)), - UID: u32Ptr(int64(d.Uid)), - GID: u32Ptr(int64(d.Gid)), - } -} - -func deviceCgroup(d *configs.Device) specs.DeviceCgroup { - t := string(d.Type) - return specs.DeviceCgroup{ - Allow: true, - Type: &t, - Major: &d.Major, - Minor: &d.Minor, - Access: &d.Permissions, - } -} - -// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. -func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { - resolvedPathOnHost := pathOnHost - - // check if it is a symbolic link - if src, e := os.Lstat(pathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { - if linkedPathOnHost, e := filepath.EvalSymlinks(pathOnHost); e == nil { - resolvedPathOnHost = linkedPathOnHost - } - } - - device, err := devices.DeviceFromPath(resolvedPathOnHost, cgroupPermissions) - // if there was no error, return the device - if err == nil { - device.Path = pathInContainer - return append(devs, Device(device)), append(devPermissions, deviceCgroup(device)), nil - } - - // if the device is not a device node - // try to see if it's a directory holding many devices - if err == devices.ErrNotADevice { - - // check if it is a directory - if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { - - // mount the internal devices recursively - filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { - childDevice, e := devices.DeviceFromPath(dpath, cgroupPermissions) - if e != nil { - // ignore the device - return nil - } - - // add the device to userSpecified devices - childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, pathInContainer, 1) - devs = append(devs, Device(childDevice)) - devPermissions = append(devPermissions, deviceCgroup(childDevice)) - - return nil - }) - } - } - - if len(devs) > 0 { - return devs, devPermissions, nil - } - - return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", pathOnHost, err) -} diff --git a/vendor/github.com/docker/docker/oci/devices_unsupported.go b/vendor/github.com/docker/docker/oci/devices_unsupported.go deleted file mode 100644 index 6252cab53..000000000 --- a/vendor/github.com/docker/docker/oci/devices_unsupported.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !linux - -package oci - -import ( - "errors" - - "github.com/opencontainers/runc/libcontainer/configs" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// Device transforms a libcontainer configs.Device to a specs.Device object. -// Not implemented -func Device(d *configs.Device) specs.Device { return specs.Device{} } - -// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. -// Not implemented -func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { - return nil, nil, errors.New("oci/devices: unsupported platform") -} diff --git a/vendor/github.com/docker/docker/oci/namespaces.go b/vendor/github.com/docker/docker/oci/namespaces.go deleted file mode 100644 index 490248249..000000000 --- a/vendor/github.com/docker/docker/oci/namespaces.go +++ /dev/null @@ -1,16 +0,0 @@ -package oci - -import specs "github.com/opencontainers/runtime-spec/specs-go" - -// RemoveNamespace removes the `nsType` namespace from OCI spec `s` -func RemoveNamespace(s *specs.Spec, nsType specs.NamespaceType) { - idx := -1 - for i, n := range s.Linux.Namespaces { - if n.Type == nsType { - idx = i - } - } - if idx >= 0 { - s.Linux.Namespaces = append(s.Linux.Namespaces[:idx], s.Linux.Namespaces[idx+1:]...) - } -} diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go deleted file mode 100644 index 266df1e53..000000000 --- a/vendor/github.com/docker/docker/opts/hosts.go +++ /dev/null @@ -1,151 +0,0 @@ -package opts - -import ( - "fmt" - "net" - "net/url" - "strconv" - "strings" -) - -var ( - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// - // These are the IANA registered port numbers for use with Docker - // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled - DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" - // DefaultTCPHost constant defines the default host string used by docker on Windows - DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) - // DefaultTLSHost constant defines the default host string used by docker for TLS sockets - DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) - // DefaultNamedPipe defines the default named pipe used by docker on Windows - DefaultNamedPipe = `//./pipe/docker_engine` -) - -// ValidateHost validates that the specified string is a valid host and returns it. -func ValidateHost(val string) (string, error) { - host := strings.TrimSpace(val) - // The empty string means default and is not handled by parseDockerDaemonHost - if host != "" { - _, err := parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for tls - return val, nil -} - -// ParseHost and set defaults for a Daemon host string -func ParseHost(defaultToTLS bool, val string) (string, error) { - host := strings.TrimSpace(val) - if host == "" { - if defaultToTLS { - host = DefaultTLSHost - } else { - host = DefaultHost - } - } else { - var err error - host, err = parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - return host, nil -} - -// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. -func parseDockerDaemonHost(addr string) (string, error) { - addrParts := strings.SplitN(addr, "://", 2) - if len(addrParts) == 1 && addrParts[0] != "" { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], DefaultTCPHost) - case "unix": - return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) - case "npipe": - return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// parseSimpleProtoAddr parses and validates that the specified address is a valid -// socket address for simple protocols like unix and npipe. It returns a formatted -// socket address, either using the address parsed from addr, or the contents of -// defaultAddr if addr is a blank string. -func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, proto+"://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("%s://%s", proto, addr), nil -} - -// ParseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. -// tryAddr is expected to have already been Trim()'d -// defaultAddr must be in the full `tcp://host:port` form -func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { - if tryAddr == "" || tryAddr == "tcp://" { - return defaultAddr, nil - } - addr := strings.TrimPrefix(tryAddr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) - } - - defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") - defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) - if err != nil { - return "", err - } - // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but - // not 1.4. See https://github.com/golang/go/issues/12200 and - // https://github.com/golang/go/issues/6530. - if strings.HasSuffix(addr, "]:") { - addr += defaultPort - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - // try port addition once - host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) - } - if err != nil { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if host == "" { - host = defaultHost - } - if port == "" { - port = defaultPort - } - p, err := strconv.Atoi(port) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil -} diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go deleted file mode 100644 index 611407a9d..000000000 --- a/vendor/github.com/docker/docker/opts/hosts_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package opts - -import "fmt" - -// DefaultHost constant defines the default host string used by docker on other hosts than Windows -var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go deleted file mode 100644 index 7c239e00f..000000000 --- a/vendor/github.com/docker/docker/opts/hosts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build windows - -package opts - -// DefaultHost constant defines the default host string used by docker on Windows -var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go deleted file mode 100644 index fb03b5011..000000000 --- a/vendor/github.com/docker/docker/opts/ip.go +++ /dev/null @@ -1,47 +0,0 @@ -package opts - -import ( - "fmt" - "net" -) - -// IPOpt holds an IP. It is used to store values from CLI flags. -type IPOpt struct { - *net.IP -} - -// NewIPOpt creates a new IPOpt from a reference net.IP and a -// string representation of an IP. If the string is not a valid -// IP it will fallback to the specified reference. -func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { - o := &IPOpt{ - IP: ref, - } - o.Set(defaultVal) - return o -} - -// Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parseable as an IP address it returns an error. -func (o *IPOpt) Set(val string) error { - ip := net.ParseIP(val) - if ip == nil { - return fmt.Errorf("%s is not an ip address", val) - } - *o.IP = ip - return nil -} - -// String returns the IP address stored in the IPOpt. If stored IP is a -// nil pointer, it returns an empty string. -func (o *IPOpt) String() string { - if *o.IP == nil { - return "" - } - return o.IP.String() -} - -// Type returns the type of the option -func (o *IPOpt) Type() string { - return "ip" -} diff --git a/vendor/github.com/docker/docker/opts/mount.go b/vendor/github.com/docker/docker/opts/mount.go deleted file mode 100644 index ce6383ddc..000000000 --- a/vendor/github.com/docker/docker/opts/mount.go +++ /dev/null @@ -1,171 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "strconv" - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" -) - -// MountOpt is a Value type for parsing mounts -type MountOpt struct { - values []mounttypes.Mount -} - -// Set a new mount value -func (m *MountOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - mount := mounttypes.Mount{} - - volumeOptions := func() *mounttypes.VolumeOptions { - if mount.VolumeOptions == nil { - mount.VolumeOptions = &mounttypes.VolumeOptions{ - Labels: make(map[string]string), - } - } - if mount.VolumeOptions.DriverConfig == nil { - mount.VolumeOptions.DriverConfig = &mounttypes.Driver{} - } - return mount.VolumeOptions - } - - bindOptions := func() *mounttypes.BindOptions { - if mount.BindOptions == nil { - mount.BindOptions = new(mounttypes.BindOptions) - } - return mount.BindOptions - } - - tmpfsOptions := func() *mounttypes.TmpfsOptions { - if mount.TmpfsOptions == nil { - mount.TmpfsOptions = new(mounttypes.TmpfsOptions) - } - return mount.TmpfsOptions - } - - setValueOnMap := func(target map[string]string, value string) { - parts := strings.SplitN(value, "=", 2) - if len(parts) == 1 { - target[value] = "" - } else { - target[parts[0]] = parts[1] - } - } - - mount.Type = mounttypes.TypeVolume // default to volume mounts - // Set writable as the default - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) == 1 { - switch key { - case "readonly", "ro": - mount.ReadOnly = true - continue - case "volume-nocopy": - volumeOptions().NoCopy = true - continue - } - } - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "type": - mount.Type = mounttypes.Type(strings.ToLower(value)) - case "source", "src": - mount.Source = value - case "target", "dst", "destination": - mount.Target = value - case "readonly", "ro": - mount.ReadOnly, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - case "bind-propagation": - bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value)) - case "volume-nocopy": - volumeOptions().NoCopy, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for populate: %s", value) - } - case "volume-label": - setValueOnMap(volumeOptions().Labels, value) - case "volume-driver": - volumeOptions().DriverConfig.Name = value - case "volume-opt": - if volumeOptions().DriverConfig.Options == nil { - volumeOptions().DriverConfig.Options = make(map[string]string) - } - setValueOnMap(volumeOptions().DriverConfig.Options, value) - case "tmpfs-size": - sizeBytes, err := units.RAMInBytes(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - tmpfsOptions().SizeBytes = sizeBytes - case "tmpfs-mode": - ui64, err := strconv.ParseUint(value, 8, 32) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - tmpfsOptions().Mode = os.FileMode(ui64) - default: - return fmt.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - - if mount.Type == "" { - return fmt.Errorf("type is required") - } - - if mount.Target == "" { - return fmt.Errorf("target is required") - } - - if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume { - return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type) - } - if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind { - return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type) - } - if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs { - return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type) - } - - m.values = append(m.values, mount) - return nil -} - -// Type returns the type of this option -func (m *MountOpt) Type() string { - return "mount" -} - -// String returns a string repr of this option -func (m *MountOpt) String() string { - mounts := []string{} - for _, mount := range m.values { - repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target) - mounts = append(mounts, repr) - } - return strings.Join(mounts, ", ") -} - -// Value returns the mounts -func (m *MountOpt) Value() []mounttypes.Mount { - return m.values -} diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go deleted file mode 100644 index ae851537e..000000000 --- a/vendor/github.com/docker/docker/opts/opts.go +++ /dev/null @@ -1,360 +0,0 @@ -package opts - -import ( - "fmt" - "math/big" - "net" - "regexp" - "strings" - - "github.com/docker/docker/api/types/filters" -) - -var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) -) - -// ListOpts holds a list of values and a validation function. -type ListOpts struct { - values *[]string - validator ValidatorFctType -} - -// NewListOpts creates a new ListOpts with the specified validator. -func NewListOpts(validator ValidatorFctType) ListOpts { - var values []string - return *NewListOptsRef(&values, validator) -} - -// NewListOptsRef creates a new ListOpts with the specified values and validator. -func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { - return &ListOpts{ - values: values, - validator: validator, - } -} - -func (opts *ListOpts) String() string { - return fmt.Sprintf("%v", []string((*opts.values))) -} - -// Set validates if needed the input value and adds it to the -// internal slice. -func (opts *ListOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - (*opts.values) = append((*opts.values), value) - return nil -} - -// Delete removes the specified element from the slice. -func (opts *ListOpts) Delete(key string) { - for i, k := range *opts.values { - if k == key { - (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) - return - } - } -} - -// GetMap returns the content of values in a map in order to avoid -// duplicates. -func (opts *ListOpts) GetMap() map[string]struct{} { - ret := make(map[string]struct{}) - for _, k := range *opts.values { - ret[k] = struct{}{} - } - return ret -} - -// GetAll returns the values of slice. -func (opts *ListOpts) GetAll() []string { - return (*opts.values) -} - -// GetAllOrEmpty returns the values of the slice -// or an empty slice when there are no values. -func (opts *ListOpts) GetAllOrEmpty() []string { - v := *opts.values - if v == nil { - return make([]string, 0) - } - return v -} - -// Get checks the existence of the specified key. -func (opts *ListOpts) Get(key string) bool { - for _, k := range *opts.values { - if k == key { - return true - } - } - return false -} - -// Len returns the amount of element in the slice. -func (opts *ListOpts) Len() int { - return len((*opts.values)) -} - -// Type returns a string name for this Option type -func (opts *ListOpts) Type() string { - return "list" -} - -// NamedOption is an interface that list and map options -// with names implement. -type NamedOption interface { - Name() string -} - -// NamedListOpts is a ListOpts with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedListOpts struct { - name string - ListOpts -} - -var _ NamedOption = &NamedListOpts{} - -// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. -func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { - return &NamedListOpts{ - name: name, - ListOpts: *NewListOptsRef(values, validator), - } -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *NamedListOpts) Name() string { - return o.name -} - -// MapOpts holds a map of values and a validation function. -type MapOpts struct { - values map[string]string - validator ValidatorFctType -} - -// Set validates if needed the input value and add it to the -// internal map, by splitting on '='. -func (opts *MapOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - vals := strings.SplitN(value, "=", 2) - if len(vals) == 1 { - (opts.values)[vals[0]] = "" - } else { - (opts.values)[vals[0]] = vals[1] - } - return nil -} - -// GetAll returns the values of MapOpts as a map. -func (opts *MapOpts) GetAll() map[string]string { - return opts.values -} - -func (opts *MapOpts) String() string { - return fmt.Sprintf("%v", map[string]string((opts.values))) -} - -// Type returns a string name for this Option type -func (opts *MapOpts) Type() string { - return "map" -} - -// NewMapOpts creates a new MapOpts with the specified map of values and a validator. -func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { - if values == nil { - values = make(map[string]string) - } - return &MapOpts{ - values: values, - validator: validator, - } -} - -// NamedMapOpts is a MapOpts struct with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedMapOpts struct { - name string - MapOpts -} - -var _ NamedOption = &NamedMapOpts{} - -// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. -func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { - return &NamedMapOpts{ - name: name, - MapOpts: *NewMapOpts(values, validator), - } -} - -// Name returns the name of the NamedMapOpts in the configuration. -func (o *NamedMapOpts) Name() string { - return o.name -} - -// ValidatorFctType defines a validator function that returns a validated string and/or an error. -type ValidatorFctType func(val string) (string, error) - -// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error -type ValidatorFctListType func(val string) ([]string, error) - -// ValidateIPAddress validates an Ip address. -func ValidateIPAddress(val string) (string, error) { - var ip = net.ParseIP(strings.TrimSpace(val)) - if ip != nil { - return ip.String(), nil - } - return "", fmt.Errorf("%s is not an ip address", val) -} - -// ValidateDNSSearch validates domain for resolvconf search configuration. -// A zero length domain is represented by a dot (.). -func ValidateDNSSearch(val string) (string, error) { - if val = strings.Trim(val, " "); val == "." { - return val, nil - } - return validateDomain(val) -} - -func validateDomain(val string) (string, error) { - if alphaRegexp.FindString(val) == "" { - return "", fmt.Errorf("%s is not a valid domain", val) - } - ns := domainRegexp.FindSubmatch([]byte(val)) - if len(ns) > 0 && len(ns[1]) < 255 { - return string(ns[1]), nil - } - return "", fmt.Errorf("%s is not a valid domain", val) -} - -// ValidateLabel validates that the specified string is a valid label, and returns it. -// Labels are in the form on key=value. -func ValidateLabel(val string) (string, error) { - if strings.Count(val, "=") < 1 { - return "", fmt.Errorf("bad attribute format: %s", val) - } - return val, nil -} - -// ValidateSysctl validates a sysctl and returns it. -func ValidateSysctl(val string) (string, error) { - validSysctlMap := map[string]bool{ - "kernel.msgmax": true, - "kernel.msgmnb": true, - "kernel.msgmni": true, - "kernel.sem": true, - "kernel.shmall": true, - "kernel.shmmax": true, - "kernel.shmmni": true, - "kernel.shm_rmid_forced": true, - } - validSysctlPrefixes := []string{ - "net.", - "fs.mqueue.", - } - arr := strings.Split(val, "=") - if len(arr) < 2 { - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) - } - if validSysctlMap[arr[0]] { - return val, nil - } - - for _, vp := range validSysctlPrefixes { - if strings.HasPrefix(arr[0], vp) { - return val, nil - } - } - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) -} - -// FilterOpt is a flag type for validating filters -type FilterOpt struct { - filter filters.Args -} - -// NewFilterOpt returns a new FilterOpt -func NewFilterOpt() FilterOpt { - return FilterOpt{filter: filters.NewArgs()} -} - -func (o *FilterOpt) String() string { - repr, err := filters.ToParam(o.filter) - if err != nil { - return "invalid filters" - } - return repr -} - -// Set sets the value of the opt by parsing the command line value -func (o *FilterOpt) Set(value string) error { - var err error - o.filter, err = filters.ParseFlag(value, o.filter) - return err -} - -// Type returns the option type -func (o *FilterOpt) Type() string { - return "filter" -} - -// Value returns the value of this option -func (o *FilterOpt) Value() filters.Args { - return o.filter -} - -// NanoCPUs is a type for fixed point fractional number. -type NanoCPUs int64 - -// String returns the string format of the number -func (c *NanoCPUs) String() string { - return big.NewRat(c.Value(), 1e9).FloatString(3) -} - -// Set sets the value of the NanoCPU by passing a string -func (c *NanoCPUs) Set(value string) error { - cpus, err := ParseCPUs(value) - *c = NanoCPUs(cpus) - return err -} - -// Type returns the type -func (c *NanoCPUs) Type() string { - return "decimal" -} - -// Value returns the value in int64 -func (c *NanoCPUs) Value() int64 { - return int64(*c) -} - -// ParseCPUs takes a string ratio and returns an integer value of nano cpus -func ParseCPUs(value string) (int64, error) { - cpu, ok := new(big.Rat).SetString(value) - if !ok { - return 0, fmt.Errorf("failed to parse %v as a rational number", value) - } - nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) - if !nano.IsInt() { - return 0, fmt.Errorf("value is too precise") - } - return nano.Num().Int64(), nil -} diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go deleted file mode 100644 index f1ce844a8..000000000 --- a/vendor/github.com/docker/docker/opts/opts_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !windows - -package opts - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 -const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go deleted file mode 100644 index ebe40c969..000000000 --- a/vendor/github.com/docker/docker/opts/opts_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -package opts - -// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. -// @jhowardmsft, @swernli. -// -// On Windows, this mitigates a problem with the default options of running -// a docker client against a local docker daemon on TP5. -// -// What was found that if the default host is "localhost", even if the client -// (and daemon as this is local) is not physically on a network, and the DNS -// cache is flushed (ipconfig /flushdns), then the client will pause for -// exactly one second when connecting to the daemon for calls. For example -// using docker run windowsservercore cmd, the CLI will send a create followed -// by an attach. You see the delay between the attach finishing and the attach -// being seen by the daemon. -// -// Here's some daemon debug logs with additional debug spew put in. The -// AfterWriteJSON log is the very last thing the daemon does as part of the -// create call. The POST /attach is the second CLI call. Notice the second -// time gap. -// -// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" -// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" -// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." -// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... -// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." -// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." -// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" -// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" -// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" -// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" -// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" -// ... 1 second gap here.... -// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" -// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" -// -// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change -// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, -// the Windows networking stack is supposed to resolve "localhost" internally, -// without hitting DNS, or even reading the hosts file (which is why localhost -// is commented out in the hosts file on Windows). -// -// We have validated that working around this using the actual IPv4 localhost -// address does not cause the delay. -// -// This does not occur with the docker client built with 1.4.3 on the same -// Windows build, regardless of whether the daemon is built using 1.5.1 -// or 1.4.3. It does not occur on Linux. We also verified we see the same thing -// on a cross-compiled Windows binary (from Linux). -// -// Final note: This is a mitigation, not a 'real' fix. It is still susceptible -// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' -// explicitly. - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 -const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/docker/docker/opts/port.go b/vendor/github.com/docker/docker/opts/port.go deleted file mode 100644 index 020a5d1e1..000000000 --- a/vendor/github.com/docker/docker/opts/port.go +++ /dev/null @@ -1,146 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/go-connections/nat" -) - -const ( - portOptTargetPort = "target" - portOptPublishedPort = "published" - portOptProtocol = "protocol" - portOptMode = "mode" -) - -// PortOpt represents a port config in swarm mode. -type PortOpt struct { - ports []swarm.PortConfig -} - -// Set a new port value -func (p *PortOpt) Set(value string) error { - longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value) - if err != nil { - return err - } - if longSyntax { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - pConfig := swarm.PortConfig{} - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid field %s", field) - } - - key := strings.ToLower(parts[0]) - value := strings.ToLower(parts[1]) - - switch key { - case portOptProtocol: - if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) { - return fmt.Errorf("invalid protocol value %s", value) - } - - pConfig.Protocol = swarm.PortConfigProtocol(value) - case portOptMode: - if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) { - return fmt.Errorf("invalid publish mode value %s", value) - } - - pConfig.PublishMode = swarm.PortConfigPublishMode(value) - case portOptTargetPort: - tPort, err := strconv.ParseUint(value, 10, 16) - if err != nil { - return err - } - - pConfig.TargetPort = uint32(tPort) - case portOptPublishedPort: - pPort, err := strconv.ParseUint(value, 10, 16) - if err != nil { - return err - } - - pConfig.PublishedPort = uint32(pPort) - default: - return fmt.Errorf("invalid field key %s", key) - } - } - - if pConfig.TargetPort == 0 { - return fmt.Errorf("missing mandatory field %q", portOptTargetPort) - } - - if pConfig.PublishMode == "" { - pConfig.PublishMode = swarm.PortConfigPublishModeIngress - } - - if pConfig.Protocol == "" { - pConfig.Protocol = swarm.PortConfigProtocolTCP - } - - p.ports = append(p.ports, pConfig) - } else { - // short syntax - portConfigs := []swarm.PortConfig{} - // We can ignore errors because the format was already validated by ValidatePort - ports, portBindings, _ := nat.ParsePortSpecs([]string{value}) - - for port := range ports { - portConfigs = append(portConfigs, ConvertPortToPortConfig(port, portBindings)...) - } - p.ports = append(p.ports, portConfigs...) - } - return nil -} - -// Type returns the type of this option -func (p *PortOpt) Type() string { - return "port" -} - -// String returns a string repr of this option -func (p *PortOpt) String() string { - ports := []string{} - for _, port := range p.ports { - repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode) - ports = append(ports, repr) - } - return strings.Join(ports, ", ") -} - -// Value returns the ports -func (p *PortOpt) Value() []swarm.PortConfig { - return p.ports -} - -// ConvertPortToPortConfig converts ports to the swarm type -func ConvertPortToPortConfig( - port nat.Port, - portBindings map[nat.Port][]nat.PortBinding, -) []swarm.PortConfig { - ports := []swarm.PortConfig{} - - for _, binding := range portBindings[port] { - hostPort, _ := strconv.ParseUint(binding.HostPort, 10, 16) - ports = append(ports, swarm.PortConfig{ - //TODO Name: ? - Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), - TargetPort: uint32(port.Int()), - PublishedPort: uint32(hostPort), - PublishMode: swarm.PortConfigPublishModeIngress, - }) - } - return ports -} diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go deleted file mode 100644 index fb1e5374b..000000000 --- a/vendor/github.com/docker/docker/opts/quotedstring.go +++ /dev/null @@ -1,37 +0,0 @@ -package opts - -// QuotedString is a string that may have extra quotes around the value. The -// quotes are stripped from the value. -type QuotedString struct { - value *string -} - -// Set sets a new value -func (s *QuotedString) Set(val string) error { - *s.value = trimQuotes(val) - return nil -} - -// Type returns the type of the value -func (s *QuotedString) Type() string { - return "string" -} - -func (s *QuotedString) String() string { - return string(*s.value) -} - -func trimQuotes(value string) string { - lastIndex := len(value) - 1 - for _, char := range []byte{'\'', '"'} { - if value[0] == char && value[lastIndex] == char { - return value[1:lastIndex] - } - } - return value -} - -// NewQuotedString returns a new quoted string option -func NewQuotedString(value *string) *QuotedString { - return &QuotedString{value: value} -} diff --git a/vendor/github.com/docker/docker/opts/secret.go b/vendor/github.com/docker/docker/opts/secret.go deleted file mode 100644 index b77a33f68..000000000 --- a/vendor/github.com/docker/docker/opts/secret.go +++ /dev/null @@ -1,107 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/docker/docker/api/types" -) - -// SecretOpt is a Value type for parsing secrets -type SecretOpt struct { - values []*types.SecretRequestOption -} - -// Set a new secret value -func (o *SecretOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - options := &types.SecretRequestOption{ - Source: "", - Target: "", - UID: "0", - GID: "0", - Mode: 0444, - } - - // support a simple syntax of --secret foo - if len(fields) == 1 { - options.Source = fields[0] - options.Target = fields[0] - o.values = append(o.values, options) - return nil - } - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "source": - options.Source = value - case "target": - tDir, _ := filepath.Split(value) - if tDir != "" { - return fmt.Errorf("target must not be a path") - } - options.Target = value - case "uid": - options.UID = value - case "gid": - options.GID = value - case "mode": - m, err := strconv.ParseUint(value, 0, 32) - if err != nil { - return fmt.Errorf("invalid mode specified: %v", err) - } - - options.Mode = os.FileMode(m) - default: - if len(fields) == 1 && value == "" { - - } else { - return fmt.Errorf("invalid field in secret request: %s", key) - } - } - } - - if options.Source == "" { - return fmt.Errorf("source is required") - } - - o.values = append(o.values, options) - return nil -} - -// Type returns the type of this option -func (o *SecretOpt) Type() string { - return "secret" -} - -// String returns a string repr of this option -func (o *SecretOpt) String() string { - secrets := []string{} - for _, secret := range o.values { - repr := fmt.Sprintf("%s -> %s", secret.Source, secret.Target) - secrets = append(secrets, repr) - } - return strings.Join(secrets, ", ") -} - -// Value returns the secret requests -func (o *SecretOpt) Value() []*types.SecretRequestOption { - return o.values -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go deleted file mode 100644 index 3261c4f49..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ /dev/null @@ -1,1175 +0,0 @@ -package archive - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/system" -) - -type ( - // Compression is the state represents if compressed or not. - Compression int - // WhiteoutFormat is the format of whiteouts unpacked - WhiteoutFormat int - // TarChownOptions wraps the chown options UID and GID. - TarChownOptions struct { - UID, GID int - } - - // TarOptions wraps the tar options. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ChownOpts *TarChownOptions - IncludeSourceDir bool - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - InUserNS bool - } - - // Archiver allows the reuse of most utility functions of this package - // with a pluggable Untar function. Also, to facilitate the passing of - // specific id mappings for untar, an archiver can be created with maps - // which will then be passed to Untar operations - Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - } - - // breakoutError is used to differentiate errors related to breaking out - // When testing archive breakout in the unit tests, this error is expected - // in order for the test to pass. - breakoutError error -) - -var ( - // ErrNotImplemented is the error message of function not implemented. - ErrNotImplemented = errors.New("Function not implemented") - defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} -) - -const ( - // HeaderSize is the size in bytes of a tar header - HeaderSize = 512 -) - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz -) - -const ( - // AUFSWhiteoutFormat is the default format for whiteouts - AUFSWhiteoutFormat WhiteoutFormat = iota - // OverlayWhiteoutFormat formats whiteout according to the overlay - // standard. - OverlayWhiteoutFormat -) - -// IsArchive checks for the magic bytes of a tar or any supported compression -// algorithm. -func IsArchive(header []byte) bool { - compression := DetectCompression(header) - if compression != Uncompressed { - return true - } - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -// IsArchivePath checks if the (possibly compressed) file at the given path -// starts with a tar file header. -func IsArchivePath(path string) bool { - file, err := os.Open(path) - if err != nil { - return false - } - defer file.Close() - rdr, err := DecompressStream(file) - if err != nil { - return false - } - r := tar.NewReader(rdr) - _, err = r.Next() - return err == nil -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debug("Len too short") - continue - } - if bytes.Compare(m, source[:len(m)]) == 0 { - return compression - } - } - return Uncompressed -} - -func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return cmdStream(exec.Command(args[0], args[1:]...), archive) -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue 18170 - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - gzReader, err := gzip.NewReader(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return readBufWrapper, nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - xzReader, chdone, err := xzDecompress(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { - <-chdone - return readBufWrapper.Close() - }), nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresseses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) - ConvertRead(*tar.Header, string) (bool, error) -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - - // For packing and unpacking whiteout files in the - // non standard format. The whiteout files defined - // by the AUFS standard are used as the tar whiteout - // standard. - WhiteoutConverter tarWhiteoutConverter -} - -// canonicalTarName provides a platform-independent and consistent posix-style -//path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) (string, error) { - name, err := CanonicalTarNameForPath(name) - if err != nil { - return "", err - } - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name, nil -} - -// addTarFile adds to the tar archive a file from `path` as `name` -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - link := "" - if fi.Mode()&os.ModeSymlink != 0 { - if link, err = os.Readlink(path); err != nil { - return err - } - } - - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return err - } - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - - inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) - if err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hard linked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - - //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files. We skip whiteout files because they were written - //by the kernel and already have proper ownership relative to the host - if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { - uid, gid, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - xUID, err := idtools.ToContainer(uid, ta.UIDMaps) - if err != nil { - return err - } - xGID, err := idtools.ToContainer(gid, ta.GIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - hdr.Gid = xGID - } - - if ta.WhiteoutConverter != nil { - wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) - if err != nil { - return err - } - - // If a new whiteout file exists, write original hdr, then - // replace hdr with wo to be written after. Whiteouts should - // always be written after the original. Note the original - // hdr may have been updated to be a whiteout with returning - // a whiteout header - if wo != nil { - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - return fmt.Errorf("tar: cannot use whiteout for non-empty file") - } - hdr = wo - } - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use system.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - // On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions, inUserns bool) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file. We use system.OpenFileSequential to use sequential - // file access to avoid depleting the standby list on Windows. - // On Linux, this equates to a regular os.OpenFile - file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar: - if inUserns { // cannot create devices in a userns - return nil - } - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeLink: - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - logrus.Debug("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err - } - } - - var errors []string - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. - errors = append(errors, err.Error()) - continue - } - return err - } - - } - - if len(errors) > 0 { - logrus.WithFields(logrus.Fields{ - "errors": errors, - }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - aTime := hdr.AccessTime - if aTime.Before(hdr.ModTime) { - // Last access time should never be before last modified time. - aTime = hdr.ModTime - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath = fixVolumePathPrefix(srcPath) - - patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) - - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(compressWriter), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: options.UIDMaps, - GIDMaps: options.GIDMaps, - WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), - } - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Errorf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - logrus.Errorf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - logrus.Errorf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range options.IncludeFiles { - rebaseName := options.RebaseNames[include] - - walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) - if err != nil { - logrus.Errorf("Error matching %s: %v", relFilePath, err) - return err - } - } - - if skip { - // If we want to skip this file and its a directory - // then we should first check to see if there's an - // excludes pattern (eg !dir/file) that starts with this - // dir. If so then we can't skip this dir. - - // Its not a dir then so we can just return/skip. - if !f.IsDir() { - return nil - } - - // No exceptions (!...) in patterns so just skip dir - if !exceptions { - return filepath.SkipDir - } - - dirSlash := relFilePath + string(filepath.Separator) - - for _, pat := range patterns { - if pat[0] != '!' { - continue - } - pat = pat[1:] + string(filepath.Separator) - if strings.HasPrefix(pat, dirSlash) { - // found a match - so can't skip this dir - return nil - } - } - - // No matching exclusion dir so just skip dir - return filepath.SkipDir - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Errorf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it - if err == io.ErrClosedPipe { - return err - } - } - return nil - }) - } - }() - - return pipeReader, nil -} - -// Unpack unpacks the decompressedArchive to dest with options. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } - whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) - if err != nil { - return err - } - } - } - - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if hdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if hdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID - } - - if whiteoutConverter != nil { - writeFile, err := whiteoutConverter.ConvertRead(hdr, path) - if err != nil { - return err - } - if !writeFile { - continue - } - } - - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - r := tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } - } - return archiver.Untar(archive, dst, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return defaultArchiver.TarUntar(src, dst) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } - } - return archiver.Untar(archive, dst, options) -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return defaultArchiver.UntarPath(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - - // if this archiver is set up with ID mapping we need to create - // the new destination directory with the remapped root UID/GID pair - // as owner - rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { - return err - } - // Create dst, copy src's content into it - logrus.Debugf("Creating dest directory: %s", dst) - if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil { - return err - } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) - return archiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return defaultArchiver.CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { - return err - } - - r, w := io.Pipe() - errC := promise.Go(func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { - return err - } - - // only perform mapping if the file being copied isn't already owned by the - // uid or gid of the remapped root in the container - if remappedRootUID != hdr.Uid { - xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if remappedRootGID != hdr.Gid { - xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }) - defer func() { - if er := <-errC; err == nil && er != nil { - err = er - } - }() - - err = archiver.Untar(r, filepath.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// Destination handling is in an operating specific manner depending -// where the daemon is running. If `dst` ends with a trailing slash -// the final destination path will be `dst/base(src)` (Linux) or -// `dst\base(src)` (Windows). -func CopyFileWithTar(src, dst string) (err error) { - return defaultArchiver.CopyFileWithTar(src, dst) -} - -// cmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { - chdone := make(chan struct{}) - cmd.Stdin = input - pipeR, pipeW := io.Pipe() - cmd.Stdout = pipeW - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, nil, err - } - - // Copy stdout to the returned pipe - go func() { - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - pipeW.Close() - } - close(chdone) - }() - - return pipeR, chdone, nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go deleted file mode 100644 index 6b2a31ff1..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ /dev/null @@ -1,95 +0,0 @@ -package archive - -import ( - "archive/tar" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/docker/docker/pkg/system" -) - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - if format == OverlayWhiteoutFormat { - return overlayWhiteoutConverter{} - } - return nil -} - -type overlayWhiteoutConverter struct{} - -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { - // convert whiteouts to AUFS format - if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { - // we just rename the file and make it normal - dir, filename := filepath.Split(hdr.Name) - hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 - hdr.Typeflag = tar.TypeReg - hdr.Size = 0 - } - - if fi.Mode()&os.ModeDir != 0 { - // convert opaque dirs to AUFS format by writing an empty file with the prefix - opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - if err != nil { - return nil, err - } - if len(opaque) == 1 && opaque[0] == 'y' { - if hdr.Xattrs != nil { - delete(hdr.Xattrs, "trusted.overlay.opaque") - } - - // create a header for the whiteout file - // it should inherit some properties from the parent, but be a regular file - wo = &tar.Header{ - Typeflag: tar.TypeReg, - Mode: hdr.Mode & int64(os.ModePerm), - Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), - Size: 0, - Uid: hdr.Uid, - Uname: hdr.Uname, - Gid: hdr.Gid, - Gname: hdr.Gname, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - } - } - - return -} - -func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - - // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay - if base == WhiteoutOpaqueDir { - if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil { - return false, err - } - if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - return true, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go deleted file mode 100644 index 54acbf285..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux - -package archive - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go deleted file mode 100644 index 7083f2fa5..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build !windows - -package archive - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. - -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return - } - - inode = uint64(s.Ino) - - // Currently go does not fill in the major/minors - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) - hdr.Devminor = int64(minor(uint64(s.Rdev))) - } - - return -} - -func getFileUIDGID(stat interface{}) (int, int, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") - } - return int(s.Uid), int(s.Gid), nil -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - if rsystem.RunningInUserNS() { - // cannot create a device if running in user namespace - return nil - } - - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= syscall.S_IFBLK - case tar.TypeChar: - mode |= syscall.S_IFCHR - case tar.TypeFifo: - mode |= syscall.S_IFIFO - } - - if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { - return err - } - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go deleted file mode 100644 index 5c3a1be34..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build windows - -package archive - -import ( - "archive/tar" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/longpath" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) - } - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil - -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 - // Add the x bit: make everything +x from windows - perm |= 0111 - - return perm -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (int, int, error) { - // no notion of file ownership mapping yet on Windows - return 0, 0, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go deleted file mode 100644 index c07d55cbd..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ /dev/null @@ -1,446 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" -) - -// ChangeType represents the change type. -type ChangeType int - -const ( - // ChangeModify represents the modify operation. - ChangeModify = iota - // ChangeAdd represents the add operation. - ChangeAdd - // ChangeDelete represents the delete operation. - ChangeDelete -) - -func (c ChangeType) String() string { - switch c { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - } - return "" -} - -// Change represents a change, it wraps the change type and path. -// It describes changes of the files in the path respect to the -// parent layers. The change could be modify, add, delete. -// This is used for layer diff. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - return fmt.Sprintf("%s %s", change.Kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) -} - -func aufsMetadataSkip(path string) (skip bool, err error) { - skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) - if err != nil { - skip = true - } - return -} - -func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { - f := filepath.Base(path) - - // If there is a whiteout, then the file was removed - if strings.HasPrefix(f, WhiteoutPrefix) { - originalFile := f[len(WhiteoutPrefix):] - return filepath.Join(filepath.Dir(path), originalFile), nil - } - - return "", nil -} - -type skipChange func(string) (bool, error) -type deleteChange func(string, string, os.FileInfo) (string, error) - -func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - if sc != nil { - if skip, err := sc(path); skip { - return err - } - } - - change := Change{ - Path: path, - } - - deletedFile, err := dc(rw, path, f) - if err != nil { - return err - } - - // Find out what kind of modification happened - if deletedFile != "" { - change.Path = deletedFile - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directory in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -// FileInfo describes the information of a file. -type FileInfo struct { - parent *FileInfo - name string - stat *system.StatT - children map[string]*FileInfo - capability []byte - added bool -} - -// LookUp looks up the file information of a file. -func (info *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := info - if path == string(os.PathSeparator) { - return info - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild, _ := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - bytes.Compare(oldChild.capability, newChild.capability) != 0 { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } - -} - -// Changes add changes to file information. -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) - if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var ( - size int64 - sf = make(map[uint64]struct{}) - ) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, err := os.Lstat(file) - if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) - continue - } - - if fileInfo != nil && !fileInfo.IsDir() { - if hasHardlinks(fileInfo) { - inode := getIno(fileInfo) - if _, ok := sf[inode]; !ok { - size += fileInfo.Size() - sf[inode] = struct{}{} - } - } else { - size += fileInfo.Size() - } - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { - reader, writer := io.Pipe() - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go deleted file mode 100644 index fc5a9dfdb..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ /dev/null @@ -1,312 +0,0 @@ -package archive - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/docker/docker/pkg/system" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) - if err != nil { - return err - } - info.stat = stat - info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of syscall.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} - -// OverlayChanges walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func OverlayChanges(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, overlayDeletedFile, nil) -} - -func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { - if fi.Mode()&os.ModeCharDevice != 0 { - s := fi.Sys().(*syscall.Stat_t) - if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 { - return path, nil - } - } - if fi.Mode()&os.ModeDir != 0 { - opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") - if err != nil { - return "", err - } - if len(opaque) == 1 && opaque[0] == 'y' { - return path, nil - } - } - - return "", nil - -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go deleted file mode 100644 index da70ed37c..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build !linux - -package archive - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/system" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go deleted file mode 100644 index 3778b732c..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !windows - -package archive - -import ( - "os" - "syscall" - - "github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldStat.UID() != newStat.UID() || - oldStat.GID() != newStat.GID() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 -} - -func getIno(fi os.FileInfo) uint64 { - return uint64(fi.Sys().(*syscall.Stat_t).Ino) -} - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index af94243fc..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package archive - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - - // Don't look at size for dirs, its not a good measure of change - if oldStat.ModTime() != newStat.ModTime() || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go deleted file mode 100644 index 0614c67ce..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ /dev/null @@ -1,458 +0,0 @@ -package archive - -import ( - "archive/tar" - "errors" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/system" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in a path separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { - // Ensure paths are in platform semantics - cleanedPath = normalizePath(cleanedPath) - originalPath = normalizePath(originalPath) - - if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(filepath.Separator) - } - cleanedPath += "." - } - - if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { - cleanedPath += string(filepath.Separator) - } - - return cleanedPath -} - -// assertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string) bool { - return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) -} - -// hasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func hasTrailingPathSeparator(path string) bool { - return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(normalizePath(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(filepath.Separator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { - return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { - sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return - } - - // Separate the source path between its directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - - filter := []string{sourceBase} - - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) - - return TarWithOptions(sourceDir, &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - }) -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool - RebaseName string -} - -// CopyInfoSourcePath stats the given path to create a CopyInfo -// struct representing that resource for the source of an archive copy -// operation. The given path should be an absolute local path. A source path -// has all symlinks evaluated that appear before the last path separator ("/" -// on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { - // normalize the file path and then evaluate the symbol link - // we will use the target file instead of the symbol link if - // followLink is set - path = normalizePath(path) - - resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) - if err != nil { - return CopyInfo{}, err - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return CopyInfo{}, err - } - - return CopyInfo{ - Path: resolvedPath, - Exists: true, - IsDir: stat.IsDir(), - RebaseName: rebaseName, - }, nil -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. The given path should be an absolute local path. -func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { - maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. - path = normalizePath(path) - originalPath := path - - stat, err := os.Lstat(path) - - if err == nil && stat.Mode()&os.ModeSymlink == 0 { - // The path exists and is not a symlink. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil - } - - // While the path is a symlink. - for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { - if n > maxSymlinkIter { - // Don't follow symlinks more than this arbitrary number of times. - return CopyInfo{}, errors.New("too many symlinks in " + originalPath) - } - - // The path is a symbolic link. We need to evaluate it so that the - // destination of the copy operation is the link target and not the - // link itself. This is notably different than CopyInfoSourcePath which - // only evaluates symlinks before the last appearing path separator. - // Also note that it is okay if the last path element is a broken - // symlink as the copy operation should create the target. - var linkTarget string - - linkTarget, err = os.Readlink(path) - if err != nil { - return CopyInfo{}, err - } - - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := SplitPathDirEntry(path) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - path = linkTarget - stat, err = os.Lstat(path) - } - - if err != nil { - // It's okay if the destination path doesn't exist. We can still - // continue the copy operation if the parent directory exists. - if !os.IsNotExist(err) { - return CopyInfo{}, err - } - - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(path) - - parentDirStat, err := os.Lstat(dstParent) - if err != nil { - return CopyInfo{}, err - } - if !parentDirStat.IsDir() { - return CopyInfo{}, ErrNotDirectory - } - - return CopyInfo{Path: path}, nil - } - - // The path exists after resolving symlinks. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { - // Ensure in platform semantics - srcInfo.Path = normalizePath(srcInfo.Path) - dstInfo.Path = normalizePath(dstInfo.Path) - - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } - -} - -// RebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { - if oldBase == string(os.PathSeparator) { - // If oldBase specifies the root directory, use an empty string as - // oldBase instead so that newBase doesn't replace the path separator - // that all paths will start with. - oldBase = "" - } - - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string, followLink bool) error { - var ( - srcInfo CopyInfo - err error - ) - - // Ensure in platform semantics - srcPath = normalizePath(srcPath) - dstPath = normalizePath(dstPath) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) - - if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { - return err - } - - content, err := TarResource(srcInfo) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { - // The destination path need not exist, but CopyInfoDestinationPath will - // ensure that at least the parent directory exists. - dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) - if err != nil { - return err - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} - -// ResolveHostSourcePath decides real path need to be copied with parameters such as -// whether to follow symbol link or not, if followLink is true, resolvedPath will return -// link target of any symbol link file, else it will only resolve symlink of directory -// but return symbol link file itself without resolving. -func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { - if followLink { - resolvedPath, err = filepath.EvalSymlinks(path) - if err != nil { - return - } - - resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) - } else { - dirPath, basePath := filepath.Split(path) - - // if not follow symbol link, then resolve symbol link of parent dir - var resolvedDirPath string - resolvedDirPath, err = filepath.EvalSymlinks(dirPath) - if err != nil { - return - } - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { - rebaseName = filepath.Base(path) - } - } - return resolvedPath, rebaseName, nil -} - -// GetRebaseName normalizes and compares path and resolvedPath, -// return completed resolved path and rebased file name -func GetRebaseName(path, resolvedPath string) (string, string) { - // linkTarget will have been cleaned (no trailing path separators and dot) so - // we can manually join it with them - var rebaseName string - if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { - resolvedPath += string(filepath.Separator) + "." - } - - if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { - resolvedPath += string(filepath.Separator) - } - - if filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - return resolvedPath, rebaseName -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go deleted file mode 100644 index e305b5e4a..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go deleted file mode 100644 index 2b775b45c..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go deleted file mode 100644 index 9e1a58c49..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ /dev/null @@ -1,279 +0,0 @@ -package archive - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" -) - -// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - unpackedPaths := make(map[string]struct{}) - - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return 0, err - } - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - if options == nil { - options = &TarOptions{} - } - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) - if err != nil { - return 0, err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { - return 0, err - } - } - - if hdr.Name != WhiteoutOpaqueDir { - continue - } - } - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, WhiteoutPrefix) { - dir := filepath.Dir(path) - if base == WhiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return 0, err - } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - err := os.RemoveAll(path) - return err - } - return nil - }) - if err != nil { - return 0, err - } - } else { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if srcHdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) - if err != nil { - return 0, err - } - srcHdr.Uid = xUID - } - if srcHdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) - if err != nil { - return 0, err - } - srcHdr.Gid = xGID - } - if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - unpackedPaths[path] = struct{}{} - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (int64, error) { - return applyLayerHandler(dest, layer, &TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform - - if decompress { - layer, err = DecompressStream(layer) - if err != nil { - return 0, err - } - } - return UnpackLayer(dest, layer, options) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go deleted file mode 100644 index cedd46a40..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/archive" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "docker-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "docker-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go deleted file mode 100644 index 3448569b1..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go deleted file mode 100644 index e85aac054..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go deleted file mode 100644 index d20478a10..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go +++ /dev/null @@ -1,23 +0,0 @@ -package archive - -// Whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const WhiteoutPrefix = ".wh." - -// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for removing an actual file. Normally these files are excluded from exported -// archives. -const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix - -// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" - -// WhiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go deleted file mode 100644 index b39d12c87..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "io" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (io.Reader, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return buf, nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go deleted file mode 100644 index a7814f5b9..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go +++ /dev/null @@ -1,97 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" -) - -var chrootArchiver = &archive.Archiver{Untar: Untar} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { - - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - if options == nil { - options = &archive.TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } - - dest = filepath.Clean(dest) - if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { - return err - } - } - - r := ioutil.NopCloser(tarArchive) - if decompress { - decompressedArchive, err := archive.DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return invokeUnpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return chrootArchiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return chrootArchiver.CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// If `dst` ends with a trailing slash '/' ('\' on Windows), the final -// destination path will be `dst/base(src)` or `dst\base(src)` -func CopyFileWithTar(src, dst string) (err error) { - return chrootArchiver.CopyFileWithTar(src, dst) -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return chrootArchiver.UntarPath(src, dst) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go deleted file mode 100644 index f2325abd7..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build !windows - -package chrootarchive - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" -) - -// untar is the entry-point for docker-untar on re-exec. This is not used on -// Windows as it does not support chroot, hence no point sandboxing through -// chroot and rexec. -func untar() { - runtime.LockOSThread() - flag.Parse() - - var options *archive.TarOptions - - //read the options from the pipe "ExtraFiles" - if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { - fatal(err) - } - - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - if err := archive.Unpack(os.Stdin, "/", options); err != nil { - fatal(err) - } - // fully consume stdin in case it is zero padded - if _, err := flush(os.Stdin); err != nil { - fatal(err) - } - - os.Exit(0) -} - -func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { - - // We can't pass a potentially large exclude list directly via cmd line - // because we easily overrun the kernel's max argument/environment size - // when the full image list is passed (e.g. when this is used by - // `docker load`). We will marshall the options via a pipe to the - // child - r, w, err := os.Pipe() - if err != nil { - return fmt.Errorf("Untar pipe failure: %v", err) - } - - cmd := reexec.Command("docker-untar", dest) - cmd.Stdin = decompressedArchive - - cmd.ExtraFiles = append(cmd.ExtraFiles, r) - output := bytes.NewBuffer(nil) - cmd.Stdout = output - cmd.Stderr = output - - if err := cmd.Start(); err != nil { - return fmt.Errorf("Untar error on re-exec cmd: %v", err) - } - //write the options to the pipe for the untar exec to read - if err := json.NewEncoder(w).Encode(options); err != nil { - return fmt.Errorf("Untar json encode to pipe failed: %v", err) - } - w.Close() - - if err := cmd.Wait(); err != nil { - // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, - // we need to exhaust `xz`'s output, otherwise the `xz` side will be - // pending on write pipe forever - io.Copy(ioutil.Discard, decompressedArchive) - - return fmt.Errorf("Error processing tar file(%v): %s", err, output) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go deleted file mode 100644 index 0a500ed5c..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -package chrootarchive - -import ( - "io" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/longpath" -) - -// chroot is not supported by Windows -func chroot(path string) error { - return nil -} - -func invokeUnpack(decompressedArchive io.ReadCloser, - dest string, - options *archive.TarOptions) error { - // Windows is different to Linux here because Windows does not support - // chroot. Hence there is no point sandboxing a chrooted process to - // do the unpack. We call inline instead within the daemon process. - return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go deleted file mode 100644 index f9d7fed63..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go +++ /dev/null @@ -1,108 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/pkg/mount" - rsystem "github.com/opencontainers/runc/libcontainer/system" -) - -// chroot on linux uses pivot_root instead of chroot -// pivot_root takes a new root and an old root. -// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. -// New root is where the new rootfs is set to. -// Old root is removed after the call to pivot_root so it is no longer available under the new root. -// This is similar to how libcontainer sets up a container's rootfs -func chroot(path string) (err error) { - // if the engine is running in a user namespace we need to use actual chroot - if rsystem.RunningInUserNS() { - return realChroot(path) - } - if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { - return fmt.Errorf("Error creating mount namespace before pivot: %v", err) - } - - // make everything in new ns private - if err := mount.MakeRPrivate("/"); err != nil { - return err - } - - if mounted, _ := mount.Mounted(path); !mounted { - if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { - return realChroot(path) - } - } - - // setup oldRoot for pivot_root - pivotDir, err := ioutil.TempDir(path, ".pivot_root") - if err != nil { - return fmt.Errorf("Error setting up pivot dir: %v", err) - } - - var mounted bool - defer func() { - if mounted { - // make sure pivotDir is not mounted before we try to remove it - if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { - if err == nil { - err = errCleanup - } - return - } - } - - errCleanup := os.Remove(pivotDir) - // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful - // because we already cleaned it up on failed pivot_root - if errCleanup != nil && !os.IsNotExist(errCleanup) { - errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) - if err == nil { - err = errCleanup - } - } - }() - - if err := syscall.PivotRoot(path, pivotDir); err != nil { - // If pivot fails, fall back to the normal chroot after cleaning up temp dir - if err := os.Remove(pivotDir); err != nil { - return fmt.Errorf("Error cleaning up after failed pivot: %v", err) - } - return realChroot(path) - } - mounted = true - - // This is the new path for where the old root (prior to the pivot) has been moved to - // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction - pivotDir = filepath.Join("/", filepath.Base(pivotDir)) - - if err := syscall.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root: %v", err) - } - - // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host - if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { - return fmt.Errorf("Error making old root private after pivot: %v", err) - } - - // Now unmount the old root so it's no longer visible from the new root - if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { - return fmt.Errorf("Error while unmounting old root after pivot: %v", err) - } - mounted = false - - return nil -} - -func realChroot(path string) error { - if err := syscall.Chroot(path); err != nil { - return fmt.Errorf("Error after fallback to chroot: %v", err) - } - if err := syscall.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root after chroot: %v", err) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go deleted file mode 100644 index 16354bf64..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows,!linux - -package chrootarchive - -import "syscall" - -func chroot(path string) error { - if err := syscall.Chroot(path); err != nil { - return err - } - return syscall.Chdir("/") -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go deleted file mode 100644 index 49acad79f..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package chrootarchive - -import ( - "io" - - "github.com/docker/docker/pkg/archive" -) - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can only be -// uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { - return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go deleted file mode 100644 index eb0aacc3a..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go +++ /dev/null @@ -1,130 +0,0 @@ -//+build !windows - -package chrootarchive - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" -) - -type applyLayerResponse struct { - LayerSize int64 `json:"layerSize"` -} - -// applyLayer is the entry-point for docker-applylayer on re-exec. This is not -// used on Windows as it does not support chroot, hence no point sandboxing -// through chroot and rexec. -func applyLayer() { - - var ( - tmpDir = "" - err error - options *archive.TarOptions - ) - runtime.LockOSThread() - flag.Parse() - - inUserns := rsystem.RunningInUserNS() - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - defer system.Umask(oldmask) - if err != nil { - fatal(err) - } - - if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { - fatal(err) - } - - if inUserns { - options.InUserNS = true - } - - if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { - fatal(err) - } - - os.Setenv("TMPDIR", tmpDir) - size, err := archive.UnpackLayer("/", os.Stdin, options) - os.RemoveAll(tmpDir) - if err != nil { - fatal(err) - } - - encoder := json.NewEncoder(os.Stdout) - if err := encoder.Encode(applyLayerResponse{size}); err != nil { - fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) - } - - if _, err := flush(os.Stdin); err != nil { - fatal(err) - } - - os.Exit(0) -} - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - if options == nil { - options = &archive.TarOptions{} - if rsystem.RunningInUserNS() { - options.InUserNS = true - } - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - data, err := json.Marshal(options) - if err != nil { - return 0, fmt.Errorf("ApplyLayer json encode: %v", err) - } - - cmd := reexec.Command("docker-applyLayer", dest) - cmd.Stdin = layer - cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) - - outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) - cmd.Stdout, cmd.Stderr = outBuf, errBuf - - if err = cmd.Run(); err != nil { - return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) - } - - // Stdout should be a valid JSON struct representing an applyLayerResponse. - response := applyLayerResponse{} - decoder := json.NewDecoder(outBuf) - if err = decoder.Decode(&response); err != nil { - return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) - } - - return response.LayerSize, nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go deleted file mode 100644 index 9dd9988de..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/longpath" -) - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - - // Ensure it is a Windows-style volume path - dest = longpath.AddPrefix(dest) - - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - - tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") - if err != nil { - return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) - } - - s, err := archive.UnpackLayer(dest, layer, nil) - os.RemoveAll(tmpDir) - if err != nil { - return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) - } - - return s, nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go deleted file mode 100644 index 4f637f17b..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build !windows - -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/docker/docker/pkg/reexec" -) - -func init() { - reexec.Register("docker-applyLayer", applyLayer) - reexec.Register("docker-untar", untar) -} - -func fatal(err error) { - fmt.Fprint(os.Stderr, err) - os.Exit(1) -} - -// flush consumes all the bytes from the reader discarding -// any errors -func flush(r io.Reader) (bytes int64, err error) { - return io.Copy(ioutil.Discard, r) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go deleted file mode 100644 index fa17c9bf8..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package chrootarchive - -func init() { -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go deleted file mode 100644 index c63ae75ce..000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ /dev/null @@ -1,283 +0,0 @@ -package fileutils - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "strings" - "text/scanner" - - "github.com/Sirupsen/logrus" -) - -// exclusion returns true if the specified pattern is an exclusion -func exclusion(pattern string) bool { - return pattern[0] == '!' -} - -// empty returns true if the specified pattern is empty -func empty(pattern string) bool { - return pattern == "" -} - -// CleanPatterns takes a slice of patterns returns a new -// slice of patterns cleaned with filepath.Clean, stripped -// of any empty patterns and lets the caller know whether the -// slice contains any exception patterns (prefixed with !). -func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { - // Loop over exclusion patterns and: - // 1. Clean them up. - // 2. Indicate whether we are dealing with any exception rules. - // 3. Error if we see a single exclusion marker on its own (!). - cleanedPatterns := []string{} - patternDirs := [][]string{} - exceptions := false - for _, pattern := range patterns { - // Eliminate leading and trailing whitespace. - pattern = strings.TrimSpace(pattern) - if empty(pattern) { - continue - } - if exclusion(pattern) { - if len(pattern) == 1 { - return nil, nil, false, errors.New("Illegal exclusion pattern: !") - } - exceptions = true - } - pattern = filepath.Clean(pattern) - cleanedPatterns = append(cleanedPatterns, pattern) - if exclusion(pattern) { - pattern = pattern[1:] - } - patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator))) - } - - return cleanedPatterns, patternDirs, exceptions, nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - patterns, patDirs, _, err := CleanPatterns(patterns) - if err != nil { - return false, err - } - - return OptimizedMatches(file, patterns, patDirs) -} - -// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. -// It will assume that the inputs have been preprocessed and therefore the function -// doesn't need to do as much error checking and clean-up. This was done to avoid -// repeating these steps on each file being checked during the archive process. -// The more generic fileutils.Matches() can't make these assumptions. -func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for i, pattern := range patterns { - negative := false - - if exclusion(pattern) { - negative = true - pattern = pattern[1:] - } - - match, err := regexpMatch(pattern, file) - if err != nil { - return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(patDirs[i]) <= len(parentPathDirs) { - match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)), - strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator))) - } - } - - if match { - matched = !negative - } - } - - if matched { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return matched, nil -} - -// regexpMatch tries to match the logic of filepath.Match but -// does so using regexp logic. We do this so that we can expand the -// wildcard set to include other things, like "**" to mean any number -// of directories. This means that we should be backwards compatible -// with filepath.Match(). We'll end up supporting more stuff, due to -// the fact that we're using regexp, but that's ok - it does no harm. -// -// As per the comment in golangs filepath.Match, on Windows, escaping -// is disabled. Instead, '\\' is treated as path separator. -func regexpMatch(pattern, path string) (bool, error) { - regStr := "^" - - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(pattern, path); err != nil { - return false, err - } - - // Go through the pattern and convert it to a regexp. - // We use a scanner so we can support utf-8 chars. - var scan scanner.Scanner - scan.Init(strings.NewReader(pattern)) - - sl := string(os.PathSeparator) - escSL := sl - if sl == `\` { - escSL += `\` - } - - for scan.Peek() != scanner.EOF { - ch := scan.Next() - - if ch == '*' { - if scan.Peek() == '*' { - // is some flavor of "**" - scan.Next() - - if scan.Peek() == scanner.EOF { - // is "**EOF" - to align with .gitignore just accept all - regStr += ".*" - } else { - // is "**" - regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" - } - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - } else { - // is "*" so map it to anything but "/" - regStr += "[^" + escSL + "]*" - } - } else if ch == '?' { - // "?" is any char except "/" - regStr += "[^" + escSL + "]" - } else if ch == '.' || ch == '$' { - // Escape some regexp special chars that have no meaning - // in golang's filepath.Match - regStr += `\` + string(ch) - } else if ch == '\\' { - // escape next char. Note that a trailing \ in the pattern - // will be left alone (but need to escape it) - if sl == `\` { - // On windows map "\" to "\\", meaning an escaped backslash, - // and then just continue because filepath.Match on - // Windows doesn't allow escaping at all - regStr += escSL - continue - } - if scan.Peek() != scanner.EOF { - regStr += `\` + string(scan.Next()) - } else { - regStr += `\` - } - } else { - regStr += string(ch) - } - } - - regStr += "$" - - res, err := regexp.MatchString(regStr, path) - - // Map regexp's error to filepath's so no one knows we're not using filepath - if err != nil { - err = filepath.ErrBadPattern - } - - return res, err -} - -// CopyFile copies from src to dst until either EOF is reached -// on src or an error occurs. It verifies src exists and removes -// the dst if it exists. -func CopyFile(src, dst string) (int64, error) { - cleanSrc := filepath.Clean(src) - cleanDst := filepath.Clean(dst) - if cleanSrc == cleanDst { - return 0, nil - } - sf, err := os.Open(cleanSrc) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(cleanDst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// CreateIfNotExists creates a file or a directory only if it does not already exist. -func CreateIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0755) - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go deleted file mode 100644 index ccd648fac..000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go +++ /dev/null @@ -1,27 +0,0 @@ -package fileutils - -import ( - "os" - "os/exec" - "strconv" - "strings" -) - -// GetTotalUsedFds returns the number of used File Descriptors by -// executing `lsof -p PID` -func GetTotalUsedFds() int { - pid := os.Getpid() - - cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) - - output, err := cmd.CombinedOutput() - if err != nil { - return -1 - } - - outputStr := strings.TrimSpace(string(output)) - - fds := strings.Split(outputStr, "\n") - - return len(fds) - 1 -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go deleted file mode 100644 index 0f2cb7ab9..000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. -// On Solaris these limits are per process and not systemwide -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go deleted file mode 100644 index d5c3abf56..000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux freebsd - -package fileutils - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -// GetTotalUsedFds Returns the number of used File Descriptors by -// reading it via /proc filesystem. -func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go deleted file mode 100644 index 5ec21cace..000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. Not supported -// on Windows. -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go b/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go deleted file mode 100644 index ded091f2a..000000000 --- a/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go +++ /dev/null @@ -1,100 +0,0 @@ -package gitutils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/urlutil" -) - -// Clone clones a repository into a newly created directory which -// will be under "docker-build-git" -func Clone(remoteURL string) (string, error) { - if !urlutil.IsGitTransport(remoteURL) { - remoteURL = "https://" + remoteURL - } - root, err := ioutil.TempDir("", "docker-build-git") - if err != nil { - return "", err - } - - u, err := url.Parse(remoteURL) - if err != nil { - return "", err - } - - fragment := u.Fragment - clone := cloneArgs(u, root) - - if output, err := git(clone...); err != nil { - return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - - return checkoutGit(fragment, root) -} - -func cloneArgs(remoteURL *url.URL, root string) []string { - args := []string{"clone", "--recursive"} - shallow := len(remoteURL.Fragment) == 0 - - if shallow && strings.HasPrefix(remoteURL.Scheme, "http") { - res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) - if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { - shallow = false - } - } - - if shallow { - args = append(args, "--depth", "1") - } - - if remoteURL.Fragment != "" { - remoteURL.Fragment = "" - } - - return append(args, remoteURL.String(), root) -} - -func checkoutGit(fragment, root string) (string, error) { - refAndDir := strings.SplitN(fragment, ":", 2) - - if len(refAndDir[0]) != 0 { - if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil { - return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - } - - if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { - newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root) - if err != nil { - return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err) - } - - fi, err := os.Stat(newCtx) - if err != nil { - return "", err - } - if !fi.IsDir() { - return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx) - } - root = newCtx - } - - return root, nil -} - -func gitWithinDir(dir string, args ...string) ([]byte, error) { - a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} - return git(append(a, args...)...) -} - -func git(args ...string) ([]byte, error) { - return exec.Command("git", args...).CombinedOutput() -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go deleted file mode 100644 index 8154e83f0..000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir.go +++ /dev/null @@ -1,39 +0,0 @@ -package homedir - -import ( - "os" - "runtime" - - "github.com/opencontainers/runc/libcontainer/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home == "" && runtime.GOOS != "windows" { - if u, err := user.CurrentUser(); err == nil { - return u.Home - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - if runtime.GOOS == "windows" { - return "%USERPROFILE%" // be careful while using in format functions - } - return "~" -} diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils.go b/vendor/github.com/docker/docker/pkg/httputils/httputils.go deleted file mode 100644 index d7dc43877..000000000 --- a/vendor/github.com/docker/docker/pkg/httputils/httputils.go +++ /dev/null @@ -1,56 +0,0 @@ -package httputils - -import ( - "errors" - "fmt" - "net/http" - "regexp" - "strings" - - "github.com/docker/docker/pkg/jsonmessage" -) - -var ( - headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) - errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") -) - -// Download requests a given URL and returns an io.Reader. -func Download(url string) (resp *http.Response, err error) { - if resp, err = http.Get(url); err != nil { - return nil, err - } - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) - } - return resp, nil -} - -// NewHTTPRequestError returns a JSON response error. -func NewHTTPRequestError(msg string, res *http.Response) error { - return &jsonmessage.JSONError{ - Message: msg, - Code: res.StatusCode, - } -} - -// ServerHeader contains the server information. -type ServerHeader struct { - App string // docker - Ver string // 1.8.0-dev - OS string // windows or linux -} - -// ParseServerHeader extracts pieces from an HTTP server header -// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). -func ParseServerHeader(hdr string) (*ServerHeader, error) { - matches := headerRegexp.FindStringSubmatch(hdr) - if len(matches) != 4 { - return nil, errInvalidHeader - } - return &ServerHeader{ - App: strings.TrimSpace(matches[1]), - Ver: strings.TrimSpace(matches[2]), - OS: strings.TrimSpace(matches[3]), - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go deleted file mode 100644 index d5cf34e4f..000000000 --- a/vendor/github.com/docker/docker/pkg/httputils/mimetype.go +++ /dev/null @@ -1,30 +0,0 @@ -package httputils - -import ( - "mime" - "net/http" -) - -// MimeTypes stores the MIME content type. -var MimeTypes = struct { - TextPlain string - Tar string - OctetStream string -}{"text/plain", "application/tar", "application/octet-stream"} - -// DetectContentType returns a best guess representation of the MIME -// content type for the bytes at c. The value detected by -// http.DetectContentType is guaranteed not be nil, defaulting to -// application/octet-stream when a better guess cannot be made. The -// result of this detection is then run through mime.ParseMediaType() -// which separates the actual MIME string from any parameters. -func DetectContentType(c []byte) (string, map[string]string, error) { - - ct := http.DetectContentType(c) - contentType, args, err := mime.ParseMediaType(ct) - if err != nil { - return "", nil, err - } - - return contentType, args, nil -} diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go deleted file mode 100644 index bebc8608c..000000000 --- a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go +++ /dev/null @@ -1,95 +0,0 @@ -package httputils - -import ( - "fmt" - "io" - "net/http" - "time" - - "github.com/Sirupsen/logrus" -) - -type resumableRequestReader struct { - client *http.Client - request *http.Request - lastRange int64 - totalSize int64 - currentResponse *http.Response - failures uint32 - maxFailures uint32 -} - -// ResumableRequestReader makes it possible to resume reading a request's body transparently -// maxfail is the number of times we retry to make requests again (not resumes) -// totalsize is the total length of the body; auto detect if not provided -func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} -} - -// ResumableRequestReaderWithInitialResponse makes it possible to resume -// reading the body of an already initiated request. -func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} -} - -func (r *resumableRequestReader) Read(p []byte) (n int, err error) { - if r.client == nil || r.request == nil { - return 0, fmt.Errorf("client and request can't be nil\n") - } - isFreshRequest := false - if r.lastRange != 0 && r.currentResponse == nil { - readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) - r.request.Header.Set("Range", readRange) - time.Sleep(5 * time.Second) - } - if r.currentResponse == nil { - r.currentResponse, err = r.client.Do(r.request) - isFreshRequest = true - } - if err != nil && r.failures+1 != r.maxFailures { - r.cleanUpResponse() - r.failures++ - time.Sleep(5 * time.Duration(r.failures) * time.Second) - return 0, nil - } else if err != nil { - r.cleanUpResponse() - return 0, err - } - if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { - r.cleanUpResponse() - return 0, io.EOF - } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { - r.cleanUpResponse() - return 0, fmt.Errorf("the server doesn't support byte ranges") - } - if r.totalSize == 0 { - r.totalSize = r.currentResponse.ContentLength - } else if r.totalSize <= 0 { - r.cleanUpResponse() - return 0, fmt.Errorf("failed to auto detect content length") - } - n, err = r.currentResponse.Body.Read(p) - r.lastRange += int64(n) - if err != nil { - r.cleanUpResponse() - } - if err != nil && err != io.EOF { - logrus.Infof("encountered error during pull and clearing it before resume: %s", err) - err = nil - } - return n, err -} - -func (r *resumableRequestReader) Close() error { - r.cleanUpResponse() - r.client = nil - r.request = nil - return nil -} - -func (r *resumableRequestReader) cleanUpResponse() { - if r.currentResponse != nil { - r.currentResponse.Body.Close() - r.currentResponse = nil - } -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go deleted file mode 100644 index 6bca46628..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ /dev/null @@ -1,197 +0,0 @@ -package idtools - -import ( - "bufio" - "fmt" - "os" - "sort" - "strconv" - "strings" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -type subIDRange struct { - Start int - Length int -} - -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } - -const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" -) - -// MkdirAllAs creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, true) -} - -// MkdirAllNewAs creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, false) -} - -// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, false, true) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - var uid, gid int - - if uidMap != nil { - xUID, err := ToHost(0, uidMap) - if err != nil { - return -1, -1, err - } - uid = xUID - } - if gidMap != nil { - xGID, err := ToHost(0, gidMap) - if err != nil { - return -1, -1, err - } - gid = xGID - } - return uid, gid, nil -} - -// ToContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func ToContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) -} - -// ToHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func ToHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) -} - -// CreateIDMappings takes a requested user and group name and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { - subuidRanges, err := parseSubuid(username) - if err != nil { - return nil, nil, err - } - subgidRanges, err := parseSubgid(groupname) - if err != nil { - return nil, nil, err - } - if len(subuidRanges) == 0 { - return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) - } - if len(subgidRanges) == 0 { - return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) - } - - return createIDMap(subuidRanges), createIDMap(subgidRanges), nil -} - -func createIDMap(subidRanges ranges) []IDMap { - idMap := []IDMap{} - - // sort the ranges by lowest ID first - sort.Sort(subidRanges) - containerID := 0 - for _, idrange := range subidRanges { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: idrange.Start, - Size: idrange.Length, - }) - containerID = containerID + idrange.Length - } - return idMap -} - -func parseSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func parseSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} - -// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) -// and return all found ranges for a specified username. If the special value -// "ALL" is supplied for username, then all ranges in the file will be returned -func parseSubidFile(path, username string) (ranges, error) { - var rangeList ranges - - subidFile, err := os.Open(path) - if err != nil { - return rangeList, err - } - defer subidFile.Close() - - s := bufio.NewScanner(subidFile) - for s.Scan() { - if err := s.Err(); err != nil { - return rangeList, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" || strings.HasPrefix(text, "#") { - continue - } - parts := strings.Split(text, ":") - if len(parts) != 3 { - return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) - } - if parts[0] == username || username == "ALL" { - startid, err := strconv.Atoi(parts[1]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - length, err := strconv.Atoi(parts[2]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - rangeList = append(rangeList, subIDRange{startid, length}) - } - } - return rangeList, nil -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go deleted file mode 100644 index f9eb31c3e..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,207 +0,0 @@ -// +build !windows - -package idtools - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/docker/docker/pkg/system" - "github.com/opencontainers/runc/libcontainer/user" -) - -var ( - entOnce sync.Once - getentCmd string -) - -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - var paths []string - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - paths = []string{path} - } else if err == nil && chownExisting { - if err := os.Chown(path, ownerUID, ownerGID); err != nil { - return err - } - // short-circuit--we were called with an existing directory and chown was requested - return nil - } else if err == nil { - // nothing to do; directory path fully exists already and chown was NOT requested - return nil - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - } else { - if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { - return err - } - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { - return err - } - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, uid, gid int) bool { - statInfo, err := system.Stat(path) - if err != nil { - return false - } - fileMode := os.FileMode(statInfo.Mode()) - permBits := fileMode.Perm() - return accessible(statInfo.UID() == uint32(uid), - statInfo.GID() == uint32(gid), permBits) -} - -func accessible(isOwner, isGroup bool, perms os.FileMode) bool { - if isOwner && (perms&0100 == 0100) { - return true - } - if isGroup && (perms&0010 == 0010) { - return true - } - if perms&0001 == 0001 { - return true - } - return false -} - -// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUser(username string) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUser(username) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) - if err != nil { - return user.User{}, err - } - return usr, nil -} - -// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUID(uid int) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUid(uid) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) -} - -func getentUser(args string) (user.User, error) { - reader, err := callGetent(args) - if err != nil { - return user.User{}, err - } - users, err := user.ParsePasswd(reader) - if err != nil { - return user.User{}, err - } - if len(users) == 0 { - return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) - } - return users[0], nil -} - -// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGroup(groupname string) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGroup(groupname) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) -} - -// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGID(gid int) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGid(gid) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %d", "group", gid)) -} - -func getentGroup(args string) (user.Group, error) { - reader, err := callGetent(args) - if err != nil { - return user.Group{}, err - } - groups, err := user.ParseGroup(reader) - if err != nil { - return user.Group{}, err - } - if len(groups) == 0 { - return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) - } - return groups[0], nil -} - -func callGetent(args string) (io.Reader, error) { - entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) - // if no `getent` command on host, can't do anything else - if getentCmd == "" { - return nil, fmt.Errorf("") - } - out, err := execCmd(getentCmd, args) - if err != nil { - exitCode, errC := system.GetExitCode(err) - if errC != nil { - return nil, err - } - switch exitCode { - case 1: - return nil, fmt.Errorf("getent reported invalid parameters/database unknown") - case 2: - terms := strings.Split(args, " ") - return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) - case 3: - return nil, fmt.Errorf("getent database doesn't support enumeration") - default: - return nil, err - } - - } - return bytes.NewReader(out), nil -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go deleted file mode 100644 index 49f67e78c..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package idtools - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -// Windows does not require/support this function, so always return true -func CanAccess(path string, uid, gid int) bool { - return true -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go deleted file mode 100644 index 9da7975e2..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ /dev/null @@ -1,164 +0,0 @@ -package idtools - -import ( - "fmt" - "regexp" - "sort" - "strconv" - "strings" - "sync" -) - -// add a user and/or group to Linux /etc/passwd, /etc/group using standard -// Linux distribution commands: -// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group -// useradd -r -s /bin/false - -var ( - once sync.Once - userCommand string - - cmdTemplates = map[string]string{ - "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", - "useradd": "-r -s /bin/false %s", - "usermod": "-%s %d-%d %s", - } - - idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) - // default length for a UID/GID subordinate range - defaultRangeLen = 65536 - defaultRangeStart = 100000 - userMod = "usermod" -) - -// AddNamespaceRangesUser takes a username and uses the standard system -// utility to create a system user/group pair used to hold the -// /etc/sub{uid,gid} ranges which will be used for user namespace -// mapping ranges in containers. -func AddNamespaceRangesUser(name string) (int, int, error) { - if err := addUser(name); err != nil { - return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) - } - - // Query the system for the created uid and gid pair - out, err := execCmd("id", name) - if err != nil { - return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) - } - matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) - if len(matches) != 3 { - return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) - } - uid, err := strconv.Atoi(matches[1]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) - } - gid, err := strconv.Atoi(matches[2]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) - } - - // Now we need to create the subuid/subgid ranges for our new user/group (system users - // do not get auto-created ranges in subuid/subgid) - - if err := createSubordinateRanges(name); err != nil { - return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) - } - return uid, gid, nil -} - -func addUser(userName string) error { - once.Do(func() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" - } - }) - if userCommand == "" { - return fmt.Errorf("Cannot add user; no useradd/adduser binary found") - } - args := fmt.Sprintf(cmdTemplates[userCommand], userName) - out, err := execCmd(userCommand, args) - if err != nil { - return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) - } - return nil -} - -func createSubordinateRanges(name string) error { - - // first, we should verify that ranges weren't automatically created - // by the distro tooling - ranges, err := parseSubuid(name) - if err != nil { - return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no UID ranges; let's create one - startID, err := findNextUIDRange() - if err != nil { - return fmt.Errorf("Can't find available subuid range: %v", err) - } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) - if err != nil { - return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) - } - } - - ranges, err = parseSubgid(name) - if err != nil { - return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no GID ranges; let's create one - startID, err := findNextGIDRange() - if err != nil { - return fmt.Errorf("Can't find available subgid range: %v", err) - } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) - if err != nil { - return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) - } - } - return nil -} - -func findNextUIDRange() (int, error) { - ranges, err := parseSubuid("ALL") - if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextGIDRange() (int, error) { - ranges, err := parseSubgid("ALL") - if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextRangeStart(rangeList ranges) (int, error) { - startID := defaultRangeStart - for _, arange := range rangeList { - if wouldOverlap(arange, startID) { - startID = arange.Start + arange.Length - } - } - return startID, nil -} - -func wouldOverlap(arange subIDRange, ID int) bool { - low := ID - high := ID + defaultRangeLen - if (low >= arange.Start && low <= arange.Start+arange.Length) || - (high <= arange.Start+arange.Length && high >= arange.Start) { - return true - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go deleted file mode 100644 index d98b354cb..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package idtools - -import "fmt" - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go deleted file mode 100644 index 9703ecbd9..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !windows - -package idtools - -import ( - "fmt" - "os/exec" - "path/filepath" - "strings" -) - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - //only return no error if the final resolved binary basename - //matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - -func execCmd(cmd, args string) ([]byte, error) { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) - return execCmd.CombinedOutput() -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go deleted file mode 100644 index 3d737b3e1..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go +++ /dev/null @@ -1,51 +0,0 @@ -package ioutils - -import ( - "errors" - "io" -) - -var errBufferFull = errors.New("buffer is full") - -type fixedBuffer struct { - buf []byte - pos int - lastRead int -} - -func (b *fixedBuffer) Write(p []byte) (int, error) { - n := copy(b.buf[b.pos:cap(b.buf)], p) - b.pos += n - - if n < len(p) { - if b.pos == cap(b.buf) { - return n, errBufferFull - } - return n, io.ErrShortWrite - } - return n, nil -} - -func (b *fixedBuffer) Read(p []byte) (int, error) { - n := copy(p, b.buf[b.lastRead:b.pos]) - b.lastRead += n - return n, nil -} - -func (b *fixedBuffer) Len() int { - return b.pos - b.lastRead -} - -func (b *fixedBuffer) Cap() int { - return cap(b.buf) -} - -func (b *fixedBuffer) Reset() { - b.pos = 0 - b.lastRead = 0 - b.buf = b.buf[:0] -} - -func (b *fixedBuffer) String() string { - return string(b.buf[b.lastRead:b.pos]) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go deleted file mode 100644 index 72a04f349..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,186 +0,0 @@ -package ioutils - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) - bufPoolsLock sync.Mutex -) - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - - written := 0 -loop0: - for { - if bp.closeErr != nil { - bp.mu.Unlock() - return written, ErrClosed - } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer - b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n - bp.bufLen += n - - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - bp.mu.Unlock() - return written, err - } - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // make sure the buffer doesn't grow too big from this write - for bp.bufLen >= blockThreshold { - bp.wait.Wait() - if bp.closeErr != nil { - continue loop0 - } - } - - // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } - bp.buf = append(bp.buf, getBuffer(nextCap)) - } - bp.wait.Broadcast() - bp.mu.Unlock() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - bp.mu.Lock() - if bp.bufLen == 0 { - if bp.closeErr != nil { - bp.mu.Unlock() - return 0, bp.closeErr - } - bp.wait.Wait() - if bp.bufLen == 0 && bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err - } - } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error - n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] - } - - if len(p) == read { - break - } - - p = p[read:] - } - - bp.wait.Broadcast() - bp.mu.Unlock() - return -} - -func returnBuffer(b *fixedBuffer) { - b.Reset() - bufPoolsLock.Lock() - pool := bufPools[b.Cap()] - bufPoolsLock.Unlock() - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - bufPoolsLock.Lock() - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - bufPoolsLock.Unlock() - return pool.Get().(*fixedBuffer) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go deleted file mode 100644 index 0b04b0ba3..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go +++ /dev/null @@ -1,22 +0,0 @@ -package ioutils - -import ( - "fmt" - "io" -) - -// FprintfIfNotEmpty prints the string value if it's not empty -func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { - if value != "" { - return fmt.Fprintf(w, format, value) - } - return 0, nil -} - -// FprintfIfTrue prints the boolean value if it's true -func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { - if ok { - return fmt.Fprintf(w, format, ok) - } - return 0, nil -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go deleted file mode 100644 index a56c46265..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ /dev/null @@ -1,162 +0,0 @@ -package ioutils - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a -// temporary file and closing it atomically changes the temporary file to -// destination path. Writing and closing concurrently is not allowed. -func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return nil, err - } - - abspath, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - return &atomicFileWriter{ - f: f, - fn: abspath, - perm: perm, - }, nil -} - -// AtomicWriteFile atomically writes data to a file named by filename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := NewAtomicFileWriter(filename, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - f.(*atomicFileWriter).writeErr = err - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type atomicFileWriter struct { - f *os.File - fn string - writeErr error - perm os.FileMode -} - -func (w *atomicFileWriter) Write(dt []byte) (int, error) { - n, err := w.f.Write(dt) - if err != nil { - w.writeErr = err - } - return n, err -} - -func (w *atomicFileWriter) Close() (retErr error) { - defer func() { - if retErr != nil || w.writeErr != nil { - os.Remove(w.f.Name()) - } - }() - if err := w.f.Sync(); err != nil { - w.f.Close() - return err - } - if err := w.f.Close(); err != nil { - return err - } - if err := os.Chmod(w.f.Name(), w.perm); err != nil { - return err - } - if w.writeErr == nil { - return os.Rename(w.f.Name(), w.fn) - } - return nil -} - -// AtomicWriteSet is used to atomically write a set -// of files and ensure they are visible at the same time. -// Must be committed to a new directory. -type AtomicWriteSet struct { - root string -} - -// NewAtomicWriteSet creates a new atomic write set to -// atomically create a set of files. The given directory -// is used as the base directory for storing files before -// commit. If no temporary directory is given the system -// default is used. -func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := ioutil.TempDir(tmpDir, "write-set-") - if err != nil { - return nil, err - } - - return &AtomicWriteSet{ - root: td, - }, nil -} - -// WriteFile writes a file to the set, guaranteeing the file -// has been synced. -func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type syncFileCloser struct { - *os.File -} - -func (w syncFileCloser) Close() error { - err := w.File.Sync() - if err1 := w.File.Close(); err == nil { - err = err1 - } - return err -} - -// FileWriter opens a file writer inside the set. The file -// should be synced and closed before calling commit. -func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) - if err != nil { - return nil, err - } - return syncFileCloser{f}, nil -} - -// Cancel cancels the set and removes all temporary data -// created in the set. -func (ws *AtomicWriteSet) Cancel() error { - return os.RemoveAll(ws.root) -} - -// Commit moves all created files to the target directory. The -// target directory must not exist and the parent of the target -// directory must exist. -func (ws *AtomicWriteSet) Commit(target string) error { - return os.Rename(ws.root, target) -} - -// String returns the location the set is writing to. -func (ws *AtomicWriteSet) String() string { - return ws.root -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go deleted file mode 100644 index d7b97486c..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go +++ /dev/null @@ -1,223 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "os" -) - -type pos struct { - idx int - offset int64 -} - -type multiReadSeeker struct { - readers []io.ReadSeeker - pos *pos - posIdx map[io.ReadSeeker]int -} - -func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { - var tmpOffset int64 - switch whence { - case os.SEEK_SET: - for i, rdr := range r.readers { - // get size of the current reader - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - if offset > tmpOffset+s { - if i == len(r.readers)-1 { - rdrOffset := s + (offset - tmpOffset) - if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { - return -1, err - } - r.pos = &pos{i, rdrOffset} - return offset, nil - } - - tmpOffset += s - continue - } - - rdrOffset := offset - tmpOffset - idx := i - - rdr.Seek(rdrOffset, os.SEEK_SET) - // make sure all following readers are at 0 - for _, rdr := range r.readers[i+1:] { - rdr.Seek(0, os.SEEK_SET) - } - - if rdrOffset == s && i != len(r.readers)-1 { - idx++ - rdrOffset = 0 - } - r.pos = &pos{idx, rdrOffset} - return offset, nil - } - case os.SEEK_END: - for _, rdr := range r.readers { - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - tmpOffset += s - } - r.Seek(tmpOffset+offset, os.SEEK_SET) - return tmpOffset + offset, nil - case os.SEEK_CUR: - if r.pos == nil { - return r.Seek(offset, os.SEEK_SET) - } - // Just return the current offset - if offset == 0 { - return r.getCurOffset() - } - - curOffset, err := r.getCurOffset() - if err != nil { - return -1, err - } - rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) - if err != nil { - return -1, err - } - - r.pos = &pos{r.posIdx[rdr], rdrOffset} - return curOffset + offset, nil - default: - return -1, fmt.Errorf("Invalid whence: %d", whence) - } - - return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) -} - -func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { - - var offsetTo int64 - - for _, rdr := range r.readers { - size, err := getReadSeekerSize(rdr) - if err != nil { - return nil, -1, err - } - if offsetTo+size > offset { - return rdr, offset - offsetTo, nil - } - if rdr == r.readers[len(r.readers)-1] { - return rdr, offsetTo + offset, nil - } - offsetTo += size - } - - return nil, 0, nil -} - -func (r *multiReadSeeker) getCurOffset() (int64, error) { - var totalSize int64 - for _, rdr := range r.readers[:r.pos.idx+1] { - if r.posIdx[rdr] == r.pos.idx { - totalSize += r.pos.offset - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, fmt.Errorf("error getting seeker size: %v", err) - } - totalSize += size - } - return totalSize, nil -} - -func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { - var offset int64 - for _, r := range r.readers { - if r == rdr { - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, err - } - offset += size - } - return offset, nil -} - -func (r *multiReadSeeker) Read(b []byte) (int, error) { - if r.pos == nil { - r.pos = &pos{0, 0} - } - - bLen := int64(len(b)) - buf := bytes.NewBuffer(nil) - var rdr io.ReadSeeker - - for _, rdr = range r.readers[r.pos.idx:] { - readBytes, err := io.CopyN(buf, rdr, bLen) - if err != nil && err != io.EOF { - return -1, err - } - bLen -= readBytes - - if bLen == 0 { - break - } - } - - rdrPos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - r.pos = &pos{r.posIdx[rdr], rdrPos} - return buf.Read(b) -} - -func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { - // save the current position - pos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - - // get the size - size, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - // reset the position - if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { - return -1, err - } - return size, nil -} - -// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided -// input readseekers. After calling this method the initial position is set to the -// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances -// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. -// Seek can be used over the sum of lengths of all readseekers. -// -// When a MultiReadSeeker is used, no Read and Seek operations should be made on -// its ReadSeeker components. Also, users should make no assumption on the state -// of individual readseekers while the MultiReadSeeker is used. -func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { - if len(readers) == 1 { - return readers[0] - } - idx := make(map[io.ReadSeeker]int) - for i, rdr := range readers { - idx[rdr] = i - } - return &multiReadSeeker{ - readers: readers, - posIdx: idx, - } -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go deleted file mode 100644 index 63f3c07f4..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ /dev/null @@ -1,154 +0,0 @@ -package ioutils - -import ( - "crypto/sha256" - "encoding/hex" - "io" - - "golang.org/x/net/context" -) - -type readCloserWrapper struct { - io.Reader - closer func() error -} - -func (r *readCloserWrapper) Close() error { - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &readCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -// OnEOFReader wraps an io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - p.closeWithError(io.EOF) - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go deleted file mode 100644 index 1539ad21b..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package ioutils - -import "io/ioutil" - -// TempDir on Unix systems is equivalent to ioutil.TempDir. -func TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go deleted file mode 100644 index c258e5fdd..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package ioutils - -import ( - "io/ioutil" - - "github.com/docker/docker/pkg/longpath" -) - -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go deleted file mode 100644 index 52a4901ad..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,92 +0,0 @@ -package ioutils - -import ( - "io" - "sync" -) - -// WriteFlusher wraps the Write and Flush operation ensuring that every write -// is a flush. In addition, the Close method can be called to intercept -// Read/Write calls if the targets lifecycle has already ended. -type WriteFlusher struct { - w io.Writer - flusher flusher - flushed chan struct{} - flushedOnce sync.Once - closed chan struct{} - closeLock sync.Mutex -} - -type flusher interface { - Flush() -} - -var errWriteFlusherClosed = io.EOF - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - select { - case <-wf.closed: - return 0, errWriteFlusherClosed - default: - } - - n, err = wf.w.Write(b) - wf.Flush() // every write is a flush. - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - select { - case <-wf.closed: - return - default: - } - - wf.flushedOnce.Do(func() { - close(wf.flushed) - }) - wf.flusher.Flush() -} - -// Flushed returns the state of flushed. -// If it's flushed, return true, or else it return false. -func (wf *WriteFlusher) Flushed() bool { - // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to - // be used to detect whether or a response code has been issued or not. - // Another hook should be used instead. - var flushed bool - select { - case <-wf.flushed: - flushed = true - default: - } - return flushed -} - -// Close closes the write flusher, disallowing any further writes to the -// target. After the flusher is closed, all calls to write or flush will -// result in an error. -func (wf *WriteFlusher) Close() error { - wf.closeLock.Lock() - defer wf.closeLock.Unlock() - - select { - case <-wf.closed: - return errWriteFlusherClosed - default: - close(wf.closed) - } - return nil -} - -// NewWriteFlusher returns a new WriteFlusher. -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var fl flusher - if f, ok := w.(flusher); ok { - fl = f - } else { - fl = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go deleted file mode 100644 index ccc7f9c23..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ /dev/null @@ -1,66 +0,0 @@ -package ioutils - -import "io" - -// NopWriter represents a type which write operation is nop. -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -// NopWriteCloser returns a nopWriteCloser. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// NopFlusher represents a type which flush operation is nop. -type NopFlusher struct{} - -// Flush is a nop operation. -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -// NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// WriteCounter wraps a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -// NewWriteCounter returns a new WriteCounter. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go deleted file mode 100644 index 4734c3111..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go +++ /dev/null @@ -1,42 +0,0 @@ -package jsonlog - -import ( - "encoding/json" - "fmt" - "time" -) - -// JSONLog represents a log message, typically a single entry from a given log stream. -// JSONLogs can be easily serialized to and from JSON and support custom formatting. -type JSONLog struct { - // Log is the log message - Log string `json:"log,omitempty"` - // Stream is the log source - Stream string `json:"stream,omitempty"` - // Created is the created timestamp of log - Created time.Time `json:"time"` - // Attrs is the list of extra attributes provided by the user - Attrs map[string]string `json:"attrs,omitempty"` -} - -// Format returns the log formatted according to format -// If format is nil, returns the log message -// If format is json, returns the log marshaled in json format -// By default, returns the log with the log time formatted according to format. -func (jl *JSONLog) Format(format string) (string, error) { - if format == "" { - return jl.Log, nil - } - if format == "json" { - m, err := json.Marshal(jl) - return string(m), err - } - return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil -} - -// Reset resets the log to nil. -func (jl *JSONLog) Reset() { - jl.Log = "" - jl.Stream = "" - jl.Created = time.Time{} -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go deleted file mode 100644 index 83ce684a8..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go +++ /dev/null @@ -1,178 +0,0 @@ -// This code was initially generated by ffjson -// This code was generated via the following steps: -// $ go get -u github.com/pquerna/ffjson -// $ make BIND_DIR=. shell -// $ ffjson pkg/jsonlog/jsonlog.go -// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go -// -// It has been modified to improve the performance of time marshalling to JSON -// and to clean it up. -// Should this code need to be regenerated when the JSONLog struct is changed, -// the relevant changes which have been made are: -// import ( -// "bytes" -//- -// "unicode/utf8" -// ) -// -// func (mj *JSONLog) MarshalJSON() ([]byte, error) { -//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { -// } -// return buf.Bytes(), nil -// } -//+ -// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -//- var err error -//- var obj []byte -//- var first bool = true -//- _ = obj -//- _ = err -//- _ = first -//+ var ( -//+ err error -//+ timestamp string -//+ first bool = true -//+ ) -// buf.WriteString(`{`) -// if len(mj.Log) != 0 { -// if first == true { -//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// buf.WriteString(`,`) -// } -// buf.WriteString(`"time":`) -//- obj, err = mj.Created.MarshalJSON() -//+ timestamp, err = FastTimeMarshalJSON(mj.Created) -// if err != nil { -// return err -// } -//- buf.Write(obj) -//+ buf.WriteString(timestamp) -// buf.WriteString(`}`) -// return nil -// } -// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// if len(mj.Log) != 0 { -// - if first == true { -// - first = false -// - } else { -// - buf.WriteString(`,`) -// - } -// + first = false -// buf.WriteString(`"log":`) -// ffjsonWriteJSONString(buf, mj.Log) -// } - -package jsonlog - -import ( - "bytes" - "unicode/utf8" -) - -// MarshalJSON marshals the JSONLog. -func (mj *JSONLog) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - buf.Grow(1024) - if err := mj.MarshalJSONBuf(&buf); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. -func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { - var ( - err error - timestamp string - first = true - ) - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - timestamp, err = FastTimeMarshalJSON(mj.Created) - if err != nil { - return err - } - buf.WriteString(timestamp) - buf.WriteString(`}`) - return nil -} - -func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.WriteString(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.WriteString(s[start:]) - } - buf.WriteByte('"') -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go deleted file mode 100644 index df522c0d6..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go +++ /dev/null @@ -1,122 +0,0 @@ -package jsonlog - -import ( - "bytes" - "encoding/json" - "unicode/utf8" -) - -// JSONLogs is based on JSONLog. -// It allows marshalling JSONLog from Log as []byte -// and an already marshalled Created timestamp. -type JSONLogs struct { - Log []byte `json:"log,omitempty"` - Stream string `json:"stream,omitempty"` - Created string `json:"time"` - - // json-encoded bytes - RawAttrs json.RawMessage `json:"attrs,omitempty"` -} - -// MarshalJSONBuf is based on the same method from JSONLog -// It has been modified to take into account the necessary changes. -func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { - var first = true - - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONBytesAsString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first == true { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if len(mj.RawAttrs) > 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"attrs":`) - buf.Write(mj.RawAttrs) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - buf.WriteString(mj.Created) - buf.WriteString(`}`) - return nil -} - -// This is based on ffjsonWriteJSONBytesAsString. It has been changed -// to accept a string passed as a slice of bytes. -func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.Write(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.Write(s[start:]) - } - buf.WriteByte('"') -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go deleted file mode 100644 index 211733814..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. -package jsonlog - -import ( - "errors" - "time" -) - -const ( - // RFC3339NanoFixed is our own version of RFC339Nano because we want one - // that pads the nano seconds part with zeros to ensure - // the timestamps are aligned in the logs. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - // JSONFormat is the format used by FastMarshalJSON - JSONFormat = `"` + time.RFC3339Nano + `"` -) - -// FastTimeMarshalJSON avoids one of the extra allocations that -// time.MarshalJSON is making. -func FastTimeMarshalJSON(t time.Time) (string, error) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") - } - return t.Format(JSONFormat), nil -} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go deleted file mode 100644 index 5481433c5..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ /dev/null @@ -1,225 +0,0 @@ -package jsonmessage - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "time" - - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/term" - "github.com/docker/go-units" -) - -// JSONError wraps a concrete Code and Message, `Code` is -// is an integer error code, `Message` is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, -// Start is the initial value for the operation. Current is the current status and -// value of the progress made towards Total. Total is the end value describing when -// we made 100% progress for an operation. -type JSONProgress struct { - terminalFd uintptr - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` -} - -func (p *JSONProgress) String() string { - var ( - width = 200 - pbBox string - numbersBox string - timeLeftBox string - ) - - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - width = int(ws.Width) - } - - if p.Current <= 0 && p.Total <= 0 { - return "" - } - current := units.HumanSize(float64(p.Current)) - if p.Total <= 0 { - return fmt.Sprintf("%8v", current) - } - total := units.HumanSize(float64(p.Total)) - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negative gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) - } - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` //deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` //deprecated - // Aux contains out-of-band data, such as digests for push signing. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -// Display displays the JSONMessage to `out`. `isTerminal` describes if `out` -// is a terminal. If this is the case, it will erase the entire current line -// when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("Authentication is required.") - } - return jm.Error - } - var endl string - if isTerminal && jm.Stream == "" && jm.Progress != nil { - // [2K = erase entire current line - fmt.Fprintf(out, "%c[2K\r", 27) - endl = "\r" - } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) - } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && isTerminal { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { //deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` -// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of -// each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]int) - ) - for { - diff := 0 - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm.Aux) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = len(ids) - ids[jm.ID] = line - if isTerminal { - fmt.Fprintf(out, "\n") - } - } - diff = len(ids) - line - if isTerminal && diff > 0 { - fmt.Fprintf(out, "%c[%dA", 27, diff) - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]int) - } - err := jm.Display(out, isTerminal) - if jm.ID != "" && isTerminal && diff > 0 { - fmt.Fprintf(out, "%c[%dB", 27, diff) - } - if err != nil { - return err - } - } - return nil -} - -type stream interface { - io.Writer - FD() uintptr - IsTerminal() bool -} - -// DisplayJSONMessagesToStream prints json messages to the output stream -func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error { - return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) -} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go deleted file mode 100644 index 9b15bfff4..000000000 --- a/vendor/github.com/docker/docker/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go deleted file mode 100644 index 607dbed43..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/flags.go +++ /dev/null @@ -1,149 +0,0 @@ -package mount - -import ( - "fmt" - "strings" -) - -var flags = map[string]struct { - clear bool - flag int -}{ - "defaults": {false, 0}, - "ro": {false, RDONLY}, - "rw": {true, RDONLY}, - "suid": {true, NOSUID}, - "nosuid": {false, NOSUID}, - "dev": {true, NODEV}, - "nodev": {false, NODEV}, - "exec": {true, NOEXEC}, - "noexec": {false, NOEXEC}, - "sync": {false, SYNCHRONOUS}, - "async": {true, SYNCHRONOUS}, - "dirsync": {false, DIRSYNC}, - "remount": {false, REMOUNT}, - "mand": {false, MANDLOCK}, - "nomand": {true, MANDLOCK}, - "atime": {true, NOATIME}, - "noatime": {false, NOATIME}, - "diratime": {true, NODIRATIME}, - "nodiratime": {false, NODIRATIME}, - "bind": {false, BIND}, - "rbind": {false, RBIND}, - "unbindable": {false, UNBINDABLE}, - "runbindable": {false, RUNBINDABLE}, - "private": {false, PRIVATE}, - "rprivate": {false, RPRIVATE}, - "shared": {false, SHARED}, - "rshared": {false, RSHARED}, - "slave": {false, SLAVE}, - "rslave": {false, RSLAVE}, - "relatime": {false, RELATIME}, - "norelatime": {true, RELATIME}, - "strictatime": {false, STRICTATIME}, - "nostrictatime": {true, STRICTATIME}, -} - -var validFlags = map[string]bool{ - "": true, - "size": true, - "mode": true, - "uid": true, - "gid": true, - "nr_inodes": true, - "nr_blocks": true, - "mpol": true, -} - -var propagationFlags = map[string]bool{ - "bind": true, - "rbind": true, - "unbindable": true, - "runbindable": true, - "private": true, - "rprivate": true, - "shared": true, - "rshared": true, - "slave": true, - "rslave": true, -} - -// MergeTmpfsOptions merge mount options to make sure there is no duplicate. -func MergeTmpfsOptions(options []string) ([]string, error) { - // We use collisions maps to remove duplicates. - // For flag, the key is the flag value (the key for propagation flag is -1) - // For data=value, the key is the data - flagCollisions := map[int]bool{} - dataCollisions := map[string]bool{} - - var newOptions []string - // We process in reverse order - for i := len(options) - 1; i >= 0; i-- { - option := options[i] - if option == "defaults" { - continue - } - if f, ok := flags[option]; ok && f.flag != 0 { - // There is only one propagation mode - key := f.flag - if propagationFlags[option] { - key = -1 - } - // Check to see if there is collision for flag - if !flagCollisions[key] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - flagCollisions[key] = true - } - continue - } - opt := strings.SplitN(option, "=", 2) - if len(opt) != 2 || !validFlags[opt[0]] { - return nil, fmt.Errorf("Invalid tmpfs option %q", opt) - } - if !dataCollisions[opt[0]] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - dataCollisions[opt[0]] = true - } - } - - return newOptions, nil -} - -// Parse fstab type mount options into mount() flags -// and device specific data -func parseOptions(options string) (int, string) { - var ( - flag int - data []string - ) - - for _, o := range strings.Split(options, ",") { - // If the option does not exist in the flags table or the flag - // is not supported on the platform, - // then it is a data value for a specific fs type - if f, exists := flags[o]; exists && f.flag != 0 { - if f.clear { - flag &= ^f.flag - } else { - flag |= f.flag - } - } else { - data = append(data, o) - } - } - return flag, strings.Join(data, ",") -} - -// ParseTmpfsOptions parse fstab type mount options into flags and data -func ParseTmpfsOptions(options string) (int, string, error) { - flags, data := parseOptions(options) - for _, o := range strings.Split(data, ",") { - opt := strings.SplitN(o, "=", 2) - if !validFlags[opt[0]] { - return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) - } - } - return flags, data, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go deleted file mode 100644 index f166cb2f7..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build freebsd,cgo - -package mount - -/* -#include -*/ -import "C" - -const ( - // RDONLY will mount the filesystem as read-only. - RDONLY = C.MNT_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = C.MNT_NOSUID - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = C.MNT_NOEXEC - - // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. - SYNCHRONOUS = C.MNT_SYNCHRONOUS - - // NOATIME will not update the file access time when reading from a file. - NOATIME = C.MNT_NOATIME -) - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NODEV = 0 - NODIRATIME = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIVE = 0 - RELATIME = 0 - REMOUNT = 0 - STRICTATIME = 0 -) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go deleted file mode 100644 index dc696dce9..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go +++ /dev/null @@ -1,85 +0,0 @@ -package mount - -import ( - "syscall" -) - -const ( - // RDONLY will mount the file system read-only. - RDONLY = syscall.MS_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = syscall.MS_NOSUID - - // NODEV will not interpret character or block special devices on the file - // system. - NODEV = syscall.MS_NODEV - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = syscall.MS_NOEXEC - - // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = syscall.MS_SYNCHRONOUS - - // DIRSYNC will force all directory updates within the file system to be done - // synchronously. This affects the following system calls: create, link, - // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = syscall.MS_DIRSYNC - - // REMOUNT will attempt to remount an already-mounted file system. This is - // commonly used to change the mount flags for a file system, especially to - // make a readonly file system writeable. It does not change device or mount - // point. - REMOUNT = syscall.MS_REMOUNT - - // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = syscall.MS_MANDLOCK - - // NOATIME will not update the file access time when reading from a file. - NOATIME = syscall.MS_NOATIME - - // NODIRATIME will not update the directory access time. - NODIRATIME = syscall.MS_NODIRATIME - - // BIND remounts a subtree somewhere else. - BIND = syscall.MS_BIND - - // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = syscall.MS_BIND | syscall.MS_REC - - // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = syscall.MS_UNBINDABLE - - // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC - - // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = syscall.MS_PRIVATE - - // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC - - // SLAVE creates a mount which receives propagation from its master, but not - // vice versa. - SLAVE = syscall.MS_SLAVE - - // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = syscall.MS_SLAVE | syscall.MS_REC - - // SHARED creates a mount which provides the ability to create mirrors of - // that mount such that mounts and unmounts within any of the mirrors - // propagate to the other mirrors. - SHARED = syscall.MS_SHARED - - // RSHARED marks the entire mount tree as SHARED. - RSHARED = syscall.MS_SHARED | syscall.MS_REC - - // RELATIME updates inode access times relative to modify or change time. - RELATIME = syscall.MS_RELATIME - - // STRICTATIME allows to explicitly request full atime updates. This makes - // it possible for the kernel to default to relatime or noatime but still - // allow userspace to override it. - STRICTATIME = syscall.MS_STRICTATIME -) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go deleted file mode 100644 index 5564f7b3c..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !linux,!freebsd freebsd,!cgo solaris,!cgo - -package mount - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NOATIME = 0 - NODEV = 0 - NODIRATIME = 0 - NOEXEC = 0 - NOSUID = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIME = 0 - RELATIVE = 0 - REMOUNT = 0 - STRICTATIME = 0 - SYNCHRONOUS = 0 - RDONLY = 0 -) diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go deleted file mode 100644 index 66ac4bf47..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ /dev/null @@ -1,74 +0,0 @@ -package mount - -import ( - "time" -) - -// GetMounts retrieves a list of mounts for the current running process. -func GetMounts() ([]*Info, error) { - return parseMountTable() -} - -// Mounted determines if a specified mountpoint has been mounted. -// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. -func Mounted(mountpoint string) (bool, error) { - entries, err := parseMountTable() - if err != nil { - return false, err - } - - // Search the table for the mountpoint - for _, e := range entries { - if e.Mountpoint == mountpoint { - return true, nil - } - } - return false, nil -} - -// Mount will mount filesystem according to the specified configuration, on the -// condition that the target path is *not* already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func Mount(device, target, mType, options string) error { - flag, _ := parseOptions(options) - if flag&REMOUNT != REMOUNT { - if mounted, err := Mounted(target); err != nil || mounted { - return err - } - } - return ForceMount(device, target, mType, options) -} - -// ForceMount will mount a filesystem according to the specified configuration, -// *regardless* if the target path is not already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func ForceMount(device, target, mType, options string) error { - flag, data := parseOptions(options) - if err := mount(device, target, mType, uintptr(flag), data); err != nil { - return err - } - return nil -} - -// Unmount will unmount the target filesystem, so long as it is mounted. -func Unmount(target string) error { - if mounted, err := Mounted(target); err != nil || !mounted { - return err - } - return ForceUnmount(target) -} - -// ForceUnmount will force an unmount of the target filesystem, regardless if -// it is mounted or not. -func ForceUnmount(target string) (err error) { - // Simple retry logic for unmount - for i := 0; i < 10; i++ { - if err = unmount(target, 0); err == nil { - return nil - } - time.Sleep(100 * time.Millisecond) - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go deleted file mode 100644 index bb870e6f5..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go +++ /dev/null @@ -1,59 +0,0 @@ -package mount - -/* -#include -#include -#include -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "strings" - "syscall" - "unsafe" -) - -func allocateIOVecs(options []string) []C.struct_iovec { - out := make([]C.struct_iovec, len(options)) - for i, option := range options { - out[i].iov_base = unsafe.Pointer(C.CString(option)) - out[i].iov_len = C.size_t(len(option) + 1) - } - return out -} - -func mount(device, target, mType string, flag uintptr, data string) error { - isNullFS := false - - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true - } - } - - options := []string{"fspath", target} - if isNullFS { - options = append(options, "fstype", "nullfs", "target", device) - } else { - options = append(options, "fstype", mType, "from", device) - } - rawOptions := allocateIOVecs(options) - for _, rawOption := range rawOptions { - defer C.free(rawOption.iov_base) - } - - if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { - reason := C.GoString(C.strerror(*C.__error())) - return fmt.Errorf("Failed to call nmount: %s", reason) - } - return nil -} - -func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go deleted file mode 100644 index dd4280c77..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go +++ /dev/null @@ -1,21 +0,0 @@ -package mount - -import ( - "syscall" -) - -func mount(device, target, mType string, flag uintptr, data string) error { - if err := syscall.Mount(device, target, mType, flag, data); err != nil { - return err - } - - // If we have a bind mount or remount, remount... - if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { - return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) - } - return nil -} - -func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go deleted file mode 100644 index c684aa81f..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build solaris,cgo - -package mount - -import ( - "golang.org/x/sys/unix" - "unsafe" -) - -// #include -// #include -// #include -// int Mount(const char *spec, const char *dir, int mflag, -// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { -// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); -// } -import "C" - -func mount(device, target, mType string, flag uintptr, data string) error { - spec := C.CString(device) - dir := C.CString(target) - fstype := C.CString(mType) - _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) - C.free(unsafe.Pointer(spec)) - C.free(unsafe.Pointer(dir)) - C.free(unsafe.Pointer(fstype)) - return err -} - -func unmount(target string, flag int) error { - err := unix.Unmount(target, flag) - return err -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go deleted file mode 100644 index a2a3bb457..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo - -package mount - -func mount(device, target, mType string, flag uintptr, data string) error { - panic("Not implemented") -} - -func unmount(target string, flag int) error { - panic("Not implemented") -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go deleted file mode 100644 index e3fc3535e..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go +++ /dev/null @@ -1,40 +0,0 @@ -package mount - -// Info reveals information about a particular mounted filesystem. This -// struct is populated from the content in the /proc//mountinfo file. -type Info struct { - // ID is a unique identifier of the mount (may be reused after umount). - ID int - - // Parent indicates the ID of the mount parent (or of self for the top of the - // mount tree). - Parent int - - // Major indicates one half of the device ID which identifies the device class. - Major int - - // Minor indicates one half of the device ID which identifies a specific - // instance of device. - Minor int - - // Root of the mount within the filesystem. - Root string - - // Mountpoint indicates the mount point relative to the process's root. - Mountpoint string - - // Opts represents mount-specific options. - Opts string - - // Optional represents optional fields. - Optional string - - // Fstype indicates the type of filesystem, such as EXT3. - Fstype string - - // Source indicates filesystem specific information or "none". - Source string - - // VfsOpts represents per super block options. - VfsOpts string -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go deleted file mode 100644 index 4f32edcd9..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go +++ /dev/null @@ -1,41 +0,0 @@ -package mount - -/* -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "reflect" - "unsafe" -) - -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts. -func parseMountTable() ([]*Info, error) { - var rawEntries *C.struct_statfs - - count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) - if count == 0 { - return nil, fmt.Errorf("Failed to call getmntinfo") - } - - var entries []C.struct_statfs - header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) - header.Cap = count - header.Len = count - header.Data = uintptr(unsafe.Pointer(rawEntries)) - - var out []*Info - for _, entry := range entries { - var mountinfo Info - mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) - mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) - mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) - out = append(out, &mountinfo) - } - return out, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go deleted file mode 100644 index be69fee1d..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build linux - -package mount - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" -) - -const ( - /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue - (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) - - (1) mount ID: unique identifier of the mount (may be reused after umount) - (2) parent ID: ID of parent (or of self for the top of the mount tree) - (3) major:minor: value of st_dev for files on filesystem - (4) root: root of the mount within the filesystem - (5) mount point: mount point relative to the process's root - (6) mount options: per mount options - (7) optional fields: zero or more fields of the form "tag[:value]" - (8) separator: marks the end of the optional fields - (9) filesystem type: name of filesystem of the form "type[.subtype]" - (10) mount source: filesystem specific information or "none" - (11) super options: per super block options*/ - mountinfoFormat = "%d %d %d:%d %s %s %s %s" -) - -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts -func parseMountTable() ([]*Info, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f) -} - -func parseInfoFile(r io.Reader) ([]*Info, error) { - var ( - s = bufio.NewScanner(r) - out = []*Info{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - var ( - p = &Info{} - text = s.Text() - optionalFields string - ) - - if _, err := fmt.Sscanf(text, mountinfoFormat, - &p.ID, &p.Parent, &p.Major, &p.Minor, - &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { - return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) - } - // Safe as mountinfo encodes mountpoints with spaces as \040. - index := strings.Index(text, " - ") - postSeparatorFields := strings.Fields(text[index+3:]) - if len(postSeparatorFields) < 3 { - return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) - } - - if optionalFields != "-" { - p.Optional = optionalFields - } - - p.Fstype = postSeparatorFields[0] - p.Source = postSeparatorFields[1] - p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") - out = append(out, p) - } - return out, nil -} - -// PidMountInfo collects the mounts for a specific process ID. If the process -// ID is unknown, it is better to use `GetMounts` which will inspect -// "/proc/self/mountinfo" instead. -func PidMountInfo(pid int) ([]*Info, error) { - f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go deleted file mode 100644 index ad9ab57f8..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build solaris,cgo - -package mount - -/* -#include -#include -*/ -import "C" - -import ( - "fmt" -) - -func parseMountTable() ([]*Info, error) { - mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) - if mnttab == nil { - return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) - } - - var out []*Info - var mp C.struct_mnttab - - ret := C.getmntent(mnttab, &mp) - for ret == 0 { - var mountinfo Info - mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) - mountinfo.Source = C.GoString(mp.mnt_special) - mountinfo.Fstype = C.GoString(mp.mnt_fstype) - mountinfo.Opts = C.GoString(mp.mnt_mntopts) - out = append(out, &mountinfo) - ret = C.getmntent(mnttab, &mp) - } - - C.fclose(mnttab) - return out, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go deleted file mode 100644 index 7fbcf1921..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo - -package mount - -import ( - "fmt" - "runtime" -) - -func parseMountTable() ([]*Info, error) { - return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go deleted file mode 100644 index dab8a37ed..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package mount - -func parseMountTable() ([]*Info, error) { - // Do NOT return an error! - return nil, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go deleted file mode 100644 index 8ceec84bc..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build linux - -package mount - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") -} - -func ensureMountedAs(mountPoint, options string) error { - mounted, err := Mounted(mountPoint) - if err != nil { - return err - } - - if !mounted { - if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { - return err - } - } - if _, err = Mounted(mountPoint); err != nil { - return err - } - - return ForceMount("", mountPoint, "none", options) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go deleted file mode 100644 index 09f6b03cb..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build solaris - -package mount - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") -} - -func ensureMountedAs(mountPoint, options string) error { - // TODO: Solaris does not support bind mounts. - // Evaluate lofs and also look at the relevant - // mount flags to be supported. - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go deleted file mode 100644 index cfb8157d6..000000000 --- a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go +++ /dev/null @@ -1,590 +0,0 @@ -package namesgenerator - -import ( - "fmt" - - "github.com/docker/docker/pkg/random" -) - -var ( - left = [...]string{ - "admiring", - "adoring", - "affectionate", - "agitated", - "amazing", - "angry", - "awesome", - "blissful", - "boring", - "brave", - "clever", - "cocky", - "compassionate", - "competent", - "condescending", - "confident", - "cranky", - "dazzling", - "determined", - "distracted", - "dreamy", - "eager", - "ecstatic", - "elastic", - "elated", - "elegant", - "eloquent", - "epic", - "fervent", - "festive", - "flamboyant", - "focused", - "friendly", - "frosty", - "gallant", - "gifted", - "goofy", - "gracious", - "happy", - "hardcore", - "heuristic", - "hopeful", - "hungry", - "infallible", - "inspiring", - "jolly", - "jovial", - "keen", - "kickass", - "kind", - "laughing", - "loving", - "lucid", - "mystifying", - "modest", - "musing", - "naughty", - "nervous", - "nifty", - "nostalgic", - "objective", - "optimistic", - "peaceful", - "pedantic", - "pensive", - "practical", - "priceless", - "quirky", - "quizzical", - "relaxed", - "reverent", - "romantic", - "sad", - "serene", - "sharp", - "silly", - "sleepy", - "stoic", - "stupefied", - "suspicious", - "tender", - "thirsty", - "trusting", - "unruffled", - "upbeat", - "vibrant", - "vigilant", - "wizardly", - "wonderful", - "xenodochial", - "youthful", - "zealous", - "zen", - } - - // Docker, starting from 0.7.x, generates names from notable scientists and hackers. - // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. - right = [...]string{ - // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB - "albattani", - - // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen - "allen", - - // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida - "almeida", - - // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi - "agnesi", - - // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes - "archimedes", - - // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli - "ardinghelli", - - // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata - "aryabhata", - - // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin - "austin", - - // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. - "babbage", - - // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach - "banach", - - // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen - "bardeen", - - // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik - "bartik", - - // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi - "bassi", - - // Hugh Beaver, British engineer, founder of the Guinness Book of World Records https://en.wikipedia.org/wiki/Hugh_Beaver - "beaver", - - // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell - "bell", - - // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha - "bhabha", - - // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus - "bhaskara", - - // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell - "blackwell", - - // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. - "bohr", - - // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth - "booth", - - // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg - "borg", - - // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose - "bose", - - // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville - "boyd", - - // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero - "brahmagupta", - - // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain - "brattain", - - // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) - "brown", - - // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson - "carson", - - // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar - "chandrasekhar", - - //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) - "shannon", - - // Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke - "clarke", - - // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden - "colden", - - // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori - "cori", - - // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray - "cray", - - // This entry reflects a husband and wife team who worked together: - // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran - // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran - "curran", - - // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. - "curie", - - // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. - "darwin", - - // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. - "davinci", - - // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. - "dijkstra", - - // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky - "dubinsky", - - // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley - "easley", - - // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison - "edison", - - // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein - "einstein", - - // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion - "elion", - - // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart - "engelbart", - - // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid - "euclid", - - // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler - "euler", - - // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat - "fermat", - - // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. - "fermi", - - // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman - "feynman", - - // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. - "franklin", - - // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei - "galileo", - - // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates - "gates", - - // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) - "goldberg", - - // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine - "goldstine", - - // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser - "goldwasser", - - // James Golick, all around gangster. - "golick", - - // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall - "goodall", - - // Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia.org/wiki/Lois_Haibt - "haibt", - - // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) - "hamilton", - - // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking - "hawking", - - // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg - "heisenberg", - - // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD - "heyrovsky", - - // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin - "hodgkin", - - // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover - "hoover", - - // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper - "hopper", - - // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle - "hugle", - - // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia - "hypatia", - - // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil - "jang", - - // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik - "jennings", - - // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen - "jepsen", - - // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie - "joliot", - - // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones - "jones", - - // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam - "kalam", - - // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare - "kare", - - // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller - "keller", - - // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana - "khorana", - - // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby - "kilby", - - // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch - "kirch", - - // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth - "knuth", - - // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya - "kowalevski", - - // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande - "lalande", - - // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr - "lamarr", - - // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport - "lamport", - - // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey - "leakey", - - // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt - "leavitt", - - //Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia.org/wiki/Daniel_Lewin - "lewin", - - // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum - "lichterman", - - // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov - "liskov", - - // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) - "lovelace", - - // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re - "lumiere", - - // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) - "mahavira", - - // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer - "mayer", - - // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) - "mccarthy", - - // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock - "mcclintock", - - // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean - "mclean", - - // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli - "mcnulty", - - // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner - "meitner", - - // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky - "meninsky", - - // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf - "mestorf", - - // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky - "minsky", - - // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani - "mirzakhani", - - // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse - "morse", - - // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock - "murdock", - - // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton - "newton", - - // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform - "nightingale", - - // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel - "nobel", - - // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether - "noether", - - // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 - "northcutt", - - // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce - "noyce", - - // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems - "panini", - - // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 - "pare", - - // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. - "pasteur", - - // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin - "payne", - - // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman - "perlman", - - // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike - "pike", - - // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 - "poincare", - - // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras - "poitras", - - // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy - "ptolemy", - - // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman - "raman", - - // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan - "ramanujan", - - // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride - "ride", - - // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) - "montalcini", - - // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie - "ritchie", - - // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen - "roentgen", - - // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin - "rosalind", - - // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha - "saha", - - // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet - "sammet", - - // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) - "shaw", - - // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley - "shirley", - - // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley - "shockley", - - // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi - "sinoussi", - - // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton - "snyder", - - // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence - "spence", - - // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman - "stallman", - - // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker - "stonebraker", - - // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson - "swanson", - - // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz - "swartz", - - // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles - "swirles", - - // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla - "tesla", - - // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson - "thompson", - - // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds - "torvalds", - - // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. - "turing", - - // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions - "varahamihira", - - // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya - "visvesvaraya", - - // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard - "volhard", - - // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer - "wescoff", - - // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles - "wiles", - - // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams - "williams", - - // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson - "wilson", - - // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing - "wing", - - // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak - "wozniak", - - // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers - "wright", - - // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow - "yalow", - - // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath - "yonath", - } -) - -// GetRandomName generates a random name from the list of adjectives and surnames in this package -// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random -// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` -func GetRandomName(retry int) string { - rnd := random.Rand -begin: - name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) - if name == "boring_wozniak" /* Steve Wozniak is not boring */ { - goto begin - } - - if retry > 0 { - name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) - } - return name -} diff --git a/vendor/github.com/docker/docker/pkg/plugingetter/getter.go b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go deleted file mode 100644 index dde5f6603..000000000 --- a/vendor/github.com/docker/docker/pkg/plugingetter/getter.go +++ /dev/null @@ -1,35 +0,0 @@ -package plugingetter - -import "github.com/docker/docker/pkg/plugins" - -const ( - // LOOKUP doesn't update RefCount - LOOKUP = 0 - // ACQUIRE increments RefCount - ACQUIRE = 1 - // RELEASE decrements RefCount - RELEASE = -1 -) - -// CompatPlugin is a abstraction to handle both v2(new) and v1(legacy) plugins. -type CompatPlugin interface { - Client() *plugins.Client - Name() string - BasePath() string - IsV1() bool -} - -// CountedPlugin is a plugin which is reference counted. -type CountedPlugin interface { - Acquire() - Release() - CompatPlugin -} - -// PluginGetter is the interface implemented by Store -type PluginGetter interface { - Get(name, capability string, mode int) (CompatPlugin, error) - GetAllByCap(capability string) ([]CompatPlugin, error) - GetAllManagedPluginsByCap(capability string) []CompatPlugin - Handle(capability string, callback func(string, *plugins.Client)) -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/client.go b/vendor/github.com/docker/docker/pkg/plugins/client.go deleted file mode 100644 index e8e730eb5..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/client.go +++ /dev/null @@ -1,205 +0,0 @@ -package plugins - -import ( - "bytes" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/plugins/transport" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" -) - -const ( - defaultTimeOut = 30 -) - -func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { - tr := &http.Transport{} - - if tlsConfig != nil { - c, err := tlsconfig.Client(*tlsConfig) - if err != nil { - return nil, err - } - tr.TLSClientConfig = c - } - - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - socket := u.Host - if socket == "" { - // valid local socket addresses have the host empty. - socket = u.Path - } - if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { - return nil, err - } - scheme := httpScheme(u) - - return transport.NewHTTPTransport(tr, scheme, socket), nil -} - -// NewClient creates a new plugin client (http). -func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { - clientTransport, err := newTransport(addr, tlsConfig) - if err != nil { - return nil, err - } - return newClientWithTransport(clientTransport, 0), nil -} - -// NewClientWithTimeout creates a new plugin client (http). -func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeoutInSecs int) (*Client, error) { - clientTransport, err := newTransport(addr, tlsConfig) - if err != nil { - return nil, err - } - return newClientWithTransport(clientTransport, timeoutInSecs), nil -} - -// newClientWithTransport creates a new plugin client with a given transport. -func newClientWithTransport(tr transport.Transport, timeoutInSecs int) *Client { - return &Client{ - http: &http.Client{ - Transport: tr, - Timeout: time.Duration(timeoutInSecs) * time.Second, - }, - requestFactory: tr, - } -} - -// Client represents a plugin client. -type Client struct { - http *http.Client // http client to use - requestFactory transport.RequestFactory -} - -// Call calls the specified method with the specified arguments for the plugin. -// It will retry for 30 seconds if a failure occurs when calling. -func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { - var buf bytes.Buffer - if args != nil { - if err := json.NewEncoder(&buf).Encode(args); err != nil { - return err - } - } - body, err := c.callWithRetry(serviceMethod, &buf, true) - if err != nil { - return err - } - defer body.Close() - if ret != nil { - if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) - return err - } - } - return nil -} - -// Stream calls the specified method with the specified arguments for the plugin and returns the response body -func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(args); err != nil { - return nil, err - } - return c.callWithRetry(serviceMethod, &buf, true) -} - -// SendFile calls the specified method, and passes through the IO stream -func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { - body, err := c.callWithRetry(serviceMethod, data, true) - if err != nil { - return err - } - defer body.Close() - if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) - return err - } - return nil -} - -func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { - req, err := c.requestFactory.NewRequest(serviceMethod, data) - if err != nil { - return nil, err - } - - var retries int - start := time.Now() - - for { - resp, err := c.http.Do(req) - if err != nil { - if !retry { - return nil, err - } - - timeOff := backoff(retries) - if abort(start, timeOff) { - return nil, err - } - retries++ - logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) - time.Sleep(timeOff) - continue - } - - if resp.StatusCode != http.StatusOK { - b, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} - } - - // Plugins' Response(s) should have an Err field indicating what went - // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just - // return the string(body) - type responseErr struct { - Err string - } - remoteErr := responseErr{} - if err := json.Unmarshal(b, &remoteErr); err == nil { - if remoteErr.Err != "" { - return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} - } - } - // old way... - return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} - } - return resp.Body, nil - } -} - -func backoff(retries int) time.Duration { - b, max := 1, defaultTimeOut - for b < max && retries > 0 { - b *= 2 - retries-- - } - if b > max { - b = max - } - return time.Duration(b) * time.Second -} - -func abort(start time.Time, timeOff time.Duration) bool { - return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second -} - -func httpScheme(u *url.URL) string { - scheme := u.Scheme - if scheme != "https" { - scheme = "http" - } - return scheme -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go deleted file mode 100644 index e99581c57..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery.go +++ /dev/null @@ -1,131 +0,0 @@ -package plugins - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/url" - "os" - "path/filepath" - "strings" - "sync" -) - -var ( - // ErrNotFound plugin not found - ErrNotFound = errors.New("plugin not found") - socketsPath = "/run/docker/plugins" -) - -// localRegistry defines a registry that is local (using unix socket). -type localRegistry struct{} - -func newLocalRegistry() localRegistry { - return localRegistry{} -} - -// Scan scans all the plugin paths and returns all the names it found -func Scan() ([]string, error) { - var names []string - if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return nil - } - - if fi.Mode()&os.ModeSocket != 0 { - name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) - names = append(names, name) - } - return nil - }); err != nil { - return nil, err - } - - for _, path := range specsPaths { - if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { - if err != nil || fi.IsDir() { - return nil - } - name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) - names = append(names, name) - return nil - }); err != nil { - return nil, err - } - } - return names, nil -} - -// Plugin returns the plugin registered with the given name (or returns an error). -func (l *localRegistry) Plugin(name string) (*Plugin, error) { - socketpaths := pluginPaths(socketsPath, name, ".sock") - - for _, p := range socketpaths { - if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { - return NewLocalPlugin(name, "unix://"+p), nil - } - } - - var txtspecpaths []string - for _, p := range specsPaths { - txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) - txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) - } - - for _, p := range txtspecpaths { - if _, err := os.Stat(p); err == nil { - if strings.HasSuffix(p, ".json") { - return readPluginJSONInfo(name, p) - } - return readPluginInfo(name, p) - } - } - return nil, ErrNotFound -} - -func readPluginInfo(name, path string) (*Plugin, error) { - content, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - addr := strings.TrimSpace(string(content)) - - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - - if len(u.Scheme) == 0 { - return nil, fmt.Errorf("Unknown protocol") - } - - return NewLocalPlugin(name, addr), nil -} - -func readPluginJSONInfo(name, path string) (*Plugin, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - var p Plugin - if err := json.NewDecoder(f).Decode(&p); err != nil { - return nil, err - } - p.name = name - if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { - p.TLSConfig.InsecureSkipVerify = true - } - p.activateWait = sync.NewCond(&sync.Mutex{}) - - return &p, nil -} - -func pluginPaths(base, name, ext string) []string { - return []string{ - filepath.Join(base, name+ext), - filepath.Join(base, name, name+ext), - } -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go deleted file mode 100644 index 693a47e39..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows - -package plugins - -var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go deleted file mode 100644 index d7c1fe494..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package plugins - -import ( - "os" - "path/filepath" -) - -var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} diff --git a/vendor/github.com/docker/docker/pkg/plugins/errors.go b/vendor/github.com/docker/docker/pkg/plugins/errors.go deleted file mode 100644 index 798847102..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -package plugins - -import ( - "fmt" - "net/http" -) - -type statusError struct { - status int - method string - err string -} - -// Error returns a formatted string for this error type -func (e *statusError) Error() string { - return fmt.Sprintf("%s: %v", e.method, e.err) -} - -// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin -func IsNotFound(err error) bool { - return isStatusError(err, http.StatusNotFound) -} - -func isStatusError(err error, status int) bool { - if err == nil { - return false - } - e, ok := err.(*statusError) - if !ok { - return false - } - return e.status == status -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go deleted file mode 100644 index 861daa320..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins.go +++ /dev/null @@ -1,335 +0,0 @@ -// Package plugins provides structures and helper functions to manage Docker -// plugins. -// -// Docker discovers plugins by looking for them in the plugin directory whenever -// a user or container tries to use one by name. UNIX domain socket files must -// be located under /run/docker/plugins, whereas spec files can be located -// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled -// by the Registry interface, which lets you list all plugins or get a plugin by -// its name if it exists. -// -// The plugins need to implement an HTTP server and bind this to the UNIX socket -// or the address specified in the spec files. -// A handshake is send at /Plugin.Activate, and plugins are expected to return -// a Manifest with a list of of Docker subsystems which this plugin implements. -// -// In order to use a plugins, you can use the ``Get`` with the name of the -// plugin and the subsystem it implements. -// -// plugin, err := plugins.Get("example", "VolumeDriver") -// if err != nil { -// return fmt.Errorf("Error looking up volume plugin example: %v", err) -// } -package plugins - -import ( - "errors" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/go-connections/tlsconfig" -) - -var ( - // ErrNotImplements is returned if the plugin does not implement the requested driver. - ErrNotImplements = errors.New("Plugin does not implement the requested driver") -) - -type plugins struct { - sync.Mutex - plugins map[string]*Plugin -} - -type extpointHandlers struct { - sync.RWMutex - extpointHandlers map[string][]func(string, *Client) -} - -var ( - storage = plugins{plugins: make(map[string]*Plugin)} - handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))} -) - -// Manifest lists what a plugin implements. -type Manifest struct { - // List of subsystem the plugin implements. - Implements []string -} - -// Plugin is the definition of a docker plugin. -type Plugin struct { - // Name of the plugin - name string - // Address of the plugin - Addr string - // TLS configuration of the plugin - TLSConfig *tlsconfig.Options - // Client attached to the plugin - client *Client - // Manifest of the plugin (see above) - Manifest *Manifest `json:"-"` - - // wait for activation to finish - activateWait *sync.Cond - // error produced by activation - activateErr error - // keeps track of callback handlers run against this plugin - handlersRun bool -} - -// BasePath returns the path to which all paths returned by the plugin are relative to. -// For v1 plugins, this always returns the host's root directory. -func (p *Plugin) BasePath() string { - return "/" -} - -// Name returns the name of the plugin. -func (p *Plugin) Name() string { - return p.name -} - -// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. -func (p *Plugin) Client() *Client { - return p.client -} - -// IsV1 returns true for V1 plugins and false otherwise. -func (p *Plugin) IsV1() bool { - return true -} - -// NewLocalPlugin creates a new local plugin. -func NewLocalPlugin(name, addr string) *Plugin { - return &Plugin{ - name: name, - Addr: addr, - // TODO: change to nil - TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, - activateWait: sync.NewCond(&sync.Mutex{}), - } -} - -func (p *Plugin) activate() error { - p.activateWait.L.Lock() - - if p.activated() { - p.runHandlers() - p.activateWait.L.Unlock() - return p.activateErr - } - - p.activateErr = p.activateWithLock() - - p.runHandlers() - p.activateWait.L.Unlock() - p.activateWait.Broadcast() - return p.activateErr -} - -// runHandlers runs the registered handlers for the implemented plugin types -// This should only be run after activation, and while the activation lock is held. -func (p *Plugin) runHandlers() { - if !p.activated() { - return - } - - handlers.RLock() - if !p.handlersRun { - for _, iface := range p.Manifest.Implements { - hdlrs, handled := handlers.extpointHandlers[iface] - if !handled { - continue - } - for _, handler := range hdlrs { - handler(p.name, p.client) - } - } - p.handlersRun = true - } - handlers.RUnlock() - -} - -// activated returns if the plugin has already been activated. -// This should only be called with the activation lock held -func (p *Plugin) activated() bool { - return p.Manifest != nil -} - -func (p *Plugin) activateWithLock() error { - c, err := NewClient(p.Addr, p.TLSConfig) - if err != nil { - return err - } - p.client = c - - m := new(Manifest) - if err = p.client.Call("Plugin.Activate", nil, m); err != nil { - return err - } - - p.Manifest = m - return nil -} - -func (p *Plugin) waitActive() error { - p.activateWait.L.Lock() - for !p.activated() { - p.activateWait.Wait() - } - p.activateWait.L.Unlock() - return p.activateErr -} - -func (p *Plugin) implements(kind string) bool { - if p.Manifest == nil { - return false - } - for _, driver := range p.Manifest.Implements { - if driver == kind { - return true - } - } - return false -} - -func load(name string) (*Plugin, error) { - return loadWithRetry(name, true) -} - -func loadWithRetry(name string, retry bool) (*Plugin, error) { - registry := newLocalRegistry() - start := time.Now() - - var retries int - for { - pl, err := registry.Plugin(name) - if err != nil { - if !retry { - return nil, err - } - - timeOff := backoff(retries) - if abort(start, timeOff) { - return nil, err - } - retries++ - logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) - time.Sleep(timeOff) - continue - } - - storage.Lock() - if pl, exists := storage.plugins[name]; exists { - storage.Unlock() - return pl, pl.activate() - } - storage.plugins[name] = pl - storage.Unlock() - - err = pl.activate() - - if err != nil { - storage.Lock() - delete(storage.plugins, name) - storage.Unlock() - } - - return pl, err - } -} - -func get(name string) (*Plugin, error) { - storage.Lock() - pl, ok := storage.plugins[name] - storage.Unlock() - if ok { - return pl, pl.activate() - } - return load(name) -} - -// Get returns the plugin given the specified name and requested implementation. -func Get(name, imp string) (*Plugin, error) { - pl, err := get(name) - if err != nil { - return nil, err - } - if err := pl.waitActive(); err == nil && pl.implements(imp) { - logrus.Debugf("%s implements: %s", name, imp) - return pl, nil - } - return nil, ErrNotImplements -} - -// Handle adds the specified function to the extpointHandlers. -func Handle(iface string, fn func(string, *Client)) { - handlers.Lock() - hdlrs, ok := handlers.extpointHandlers[iface] - if !ok { - hdlrs = []func(string, *Client){} - } - - hdlrs = append(hdlrs, fn) - handlers.extpointHandlers[iface] = hdlrs - - storage.Lock() - for _, p := range storage.plugins { - p.activateWait.L.Lock() - if p.activated() && p.implements(iface) { - p.handlersRun = false - } - p.activateWait.L.Unlock() - } - storage.Unlock() - - handlers.Unlock() -} - -// GetAll returns all the plugins for the specified implementation -func GetAll(imp string) ([]*Plugin, error) { - pluginNames, err := Scan() - if err != nil { - return nil, err - } - - type plLoad struct { - pl *Plugin - err error - } - - chPl := make(chan *plLoad, len(pluginNames)) - var wg sync.WaitGroup - for _, name := range pluginNames { - storage.Lock() - pl, ok := storage.plugins[name] - storage.Unlock() - if ok { - chPl <- &plLoad{pl, nil} - continue - } - - wg.Add(1) - go func(name string) { - defer wg.Done() - pl, err := loadWithRetry(name, false) - chPl <- &plLoad{pl, err} - }(name) - } - - wg.Wait() - close(chPl) - - var out []*Plugin - for pl := range chPl { - if pl.err != nil { - logrus.Error(pl.err) - continue - } - if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { - out = append(out, pl.pl) - } - } - return out, nil -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go deleted file mode 100644 index 5be146af6..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go +++ /dev/null @@ -1,36 +0,0 @@ -package transport - -import ( - "io" - "net/http" -) - -// httpTransport holds an http.RoundTripper -// and information about the scheme and address the transport -// sends request to. -type httpTransport struct { - http.RoundTripper - scheme string - addr string -} - -// NewHTTPTransport creates a new httpTransport. -func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { - return httpTransport{ - RoundTripper: r, - scheme: scheme, - addr: addr, - } -} - -// NewRequest creates a new http.Request and sets the URL -// scheme and address with the transport's fields. -func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { - req, err := newHTTPRequest(path, data) - if err != nil { - return nil, err - } - req.URL.Scheme = t.scheme - req.URL.Host = t.addr - return req, nil -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go deleted file mode 100644 index d7f1e2100..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go +++ /dev/null @@ -1,36 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "strings" -) - -// VersionMimetype is the Content-Type the engine sends to plugins. -const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" - -// RequestFactory defines an interface that -// transports can implement to create new requests. -type RequestFactory interface { - NewRequest(path string, data io.Reader) (*http.Request, error) -} - -// Transport defines an interface that plugin transports -// must implement. -type Transport interface { - http.RoundTripper - RequestFactory -} - -// newHTTPRequest creates a new request with a path and a body. -func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - req, err := http.NewRequest("POST", path, data) - if err != nil { - return nil, err - } - req.Header.Add("Accept", VersionMimetype) - return req, nil -} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go deleted file mode 100644 index 5c5aead69..000000000 --- a/vendor/github.com/docker/docker/pkg/pools/pools.go +++ /dev/null @@ -1,116 +0,0 @@ -// Package pools provides a collection of pools which provide various -// data types with buffers. These can be used to lower the number of -// memory allocations and reuse buffers. -// -// New pools should be added to this package to allow them to be -// shared across packages. -// -// Utility functions which operate on pools should be added to this -// package to allow them to be reused. -package pools - -import ( - "bufio" - "io" - "sync" - - "github.com/docker/docker/pkg/ioutils" -) - -var ( - // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) -) - -const buffer32K = 32 * 1024 - -// BufioReaderPool is a bufio reader that uses sync.Pool. -type BufioReaderPool struct { - pool sync.Pool -} - -// newBufioReaderPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - return &BufioReaderPool{ - pool: sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, - }, - } -} - -// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - buf := bufPool.pool.Get().(*bufio.Reader) - buf.Reset(r) - return buf -} - -// Put puts the bufio.Reader back into the pool. -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. -func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := BufioReader32KPool.Get(src) - written, err = io.Copy(dst, buf) - BufioReader32KPool.Put(buf) - return -} - -// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back -// into the pool and closes the reader if it's an io.ReadCloser. -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} - -// BufioWriterPool is a bufio writer that uses sync.Pool. -type BufioWriterPool struct { - pool sync.Pool -} - -// newBufioWriterPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - return &BufioWriterPool{ - pool: sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, - }, - } -} - -// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - buf := bufPool.pool.Get().(*bufio.Writer) - buf.Reset(w) - return buf -} - -// Put puts the bufio.Writer back into the pool. -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - writeCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} diff --git a/vendor/github.com/docker/docker/pkg/progress/progress.go b/vendor/github.com/docker/docker/pkg/progress/progress.go deleted file mode 100644 index fcf31173c..000000000 --- a/vendor/github.com/docker/docker/pkg/progress/progress.go +++ /dev/null @@ -1,84 +0,0 @@ -package progress - -import ( - "fmt" -) - -// Progress represents the progress of a transfer. -type Progress struct { - ID string - - // Progress contains a Message or... - Message string - - // ...progress of an action - Action string - Current int64 - Total int64 - - // Aux contains extra information not presented to the user, such as - // digests for push signing. - Aux interface{} - - LastUpdate bool -} - -// Output is an interface for writing progress information. It's -// like a writer for progress, but we don't call it Writer because -// that would be confusing next to ProgressReader (also, because it -// doesn't implement the io.Writer interface). -type Output interface { - WriteProgress(Progress) error -} - -type chanOutput chan<- Progress - -func (out chanOutput) WriteProgress(p Progress) error { - out <- p - return nil -} - -// ChanOutput returns an Output that writes progress updates to the -// supplied channel. -func ChanOutput(progressChan chan<- Progress) Output { - return chanOutput(progressChan) -} - -type discardOutput struct{} - -func (discardOutput) WriteProgress(Progress) error { - return nil -} - -// DiscardOutput returns an Output that discards progress -func DiscardOutput() Output { - return discardOutput{} -} - -// Update is a convenience function to write a progress update to the channel. -func Update(out Output, id, action string) { - out.WriteProgress(Progress{ID: id, Action: action}) -} - -// Updatef is a convenience function to write a printf-formatted progress update -// to the channel. -func Updatef(out Output, id, format string, a ...interface{}) { - Update(out, id, fmt.Sprintf(format, a...)) -} - -// Message is a convenience function to write a progress message to the channel. -func Message(out Output, id, message string) { - out.WriteProgress(Progress{ID: id, Message: message}) -} - -// Messagef is a convenience function to write a printf-formatted progress -// message to the channel. -func Messagef(out Output, id, format string, a ...interface{}) { - Message(out, id, fmt.Sprintf(format, a...)) -} - -// Aux sends auxiliary information over a progress interface, which will not be -// formatted for the UI. This is used for things such as push signing. -func Aux(out Output, a interface{}) { - out.WriteProgress(Progress{Aux: a}) -} diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/docker/docker/pkg/progress/progressreader.go deleted file mode 100644 index 6b3927eec..000000000 --- a/vendor/github.com/docker/docker/pkg/progress/progressreader.go +++ /dev/null @@ -1,66 +0,0 @@ -package progress - -import ( - "io" - "time" - - "golang.org/x/time/rate" -) - -// Reader is a Reader with progress bar. -type Reader struct { - in io.ReadCloser // Stream to read from - out Output // Where to send progress bar to - size int64 - current int64 - lastUpdate int64 - id string - action string - rateLimiter *rate.Limiter -} - -// NewProgressReader creates a new ProgressReader. -func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { - return &Reader{ - in: in, - out: out, - size: size, - id: id, - action: action, - rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), - } -} - -func (p *Reader) Read(buf []byte) (n int, err error) { - read, err := p.in.Read(buf) - p.current += int64(read) - updateEvery := int64(1024 * 512) //512kB - if p.size > 0 { - // Update progress for every 1% read if 1% < 512kB - if increment := int64(0.01 * float64(p.size)); increment < updateEvery { - updateEvery = increment - } - } - if p.current-p.lastUpdate > updateEvery || err != nil { - p.updateProgress(err != nil && read == 0) - p.lastUpdate = p.current - } - - return read, err -} - -// Close closes the progress reader and its underlying reader. -func (p *Reader) Close() error { - if p.current < p.size { - // print a full progress bar when closing prematurely - p.current = p.size - p.updateProgress(false) - } - return p.in.Close() -} - -func (p *Reader) updateProgress(last bool) { - if last || p.current == p.size || p.rateLimiter.Allow() { - p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) - } -} diff --git a/vendor/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/docker/docker/pkg/promise/promise.go deleted file mode 100644 index dd52b9082..000000000 --- a/vendor/github.com/docker/docker/pkg/promise/promise.go +++ /dev/null @@ -1,11 +0,0 @@ -package promise - -// Go is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func Go(f func() error) chan error { - ch := make(chan error, 1) - go func() { - ch <- f() - }() - return ch -} diff --git a/vendor/github.com/docker/docker/pkg/random/random.go b/vendor/github.com/docker/docker/pkg/random/random.go deleted file mode 100644 index 70de4d130..000000000 --- a/vendor/github.com/docker/docker/pkg/random/random.go +++ /dev/null @@ -1,71 +0,0 @@ -package random - -import ( - cryptorand "crypto/rand" - "io" - "math" - "math/big" - "math/rand" - "sync" - "time" -) - -// Rand is a global *rand.Rand instance, which initialized with NewSource() source. -var Rand = rand.New(NewSource()) - -// Reader is a global, shared instance of a pseudorandom bytes generator. -// It doesn't consume entropy. -var Reader io.Reader = &reader{rnd: Rand} - -// copypaste from standard math/rand -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// NewSource returns math/rand.Source safe for concurrent use and initialized -// with current unix-nano timestamp -func NewSource() rand.Source { - var seed int64 - if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { - // This should not happen, but worst-case fallback to time-based seed. - seed = time.Now().UnixNano() - } else { - seed = cryptoseed.Int64() - } - return &lockedSource{ - src: rand.NewSource(seed), - } -} - -type reader struct { - rnd *rand.Rand -} - -func (r *reader) Read(b []byte) (int, error) { - i := 0 - for { - val := r.rnd.Int63() - for val > 0 { - b[i] = byte(val) - i++ - if i == len(b) { - return i, nil - } - val >>= 8 - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go deleted file mode 100644 index 34ae2a9dc..000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build linux - -package reexec - -import ( - "os/exec" - "syscall" -) - -// Self returns the path to the current process's binary. -// Returns "/proc/self/exe". -func Self() string { - return "/proc/self/exe" -} - -// Command returns *exec.Cmd which has Path as current binary. Also it setting -// SysProcAttr.Pdeathsig to SIGTERM. -// This will use the in-memory version (/proc/self/exe) of the current binary, -// it is thus safe to delete or replace the on-disk binary (os.Args[0]). -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - SysProcAttr: &syscall.SysProcAttr{ - Pdeathsig: syscall.SIGTERM, - }, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go deleted file mode 100644 index 778a720e3..000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build freebsd solaris darwin - -package reexec - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will -// be set to "/usr/bin/docker". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go deleted file mode 100644 index 76edd8242..000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris,!darwin - -package reexec - -import ( - "os/exec" -) - -// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. -func Command(args ...string) *exec.Cmd { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go deleted file mode 100644 index ca871c422..000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build windows - -package reexec - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker.exe" at "C:\", then cmd.Path will -// be set to "C:\docker.exe". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go deleted file mode 100644 index c56671d91..000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/reexec.go +++ /dev/null @@ -1,47 +0,0 @@ -package reexec - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" -) - -var registeredInitializers = make(map[string]func()) - -// Register adds an initialization func under the specified name -func Register(name string, initializer func()) { - if _, exists := registeredInitializers[name]; exists { - panic(fmt.Sprintf("reexec func already registered under name %q", name)) - } - - registeredInitializers[name] = initializer -} - -// Init is called as the first part of the exec process and returns true if an -// initialization function was called. -func Init() bool { - initializer, exists := registeredInitializers[os.Args[0]] - if exists { - initializer() - - return true - } - return false -} - -func naiveSelf() string { - name := os.Args[0] - if filepath.Base(name) == name { - if lp, err := exec.LookPath(name); err == nil { - return lp - } - } - // handle conversion of relative paths to absolute - if absName, err := filepath.Abs(name); err == nil { - return absName - } - // if we couldn't get absolute name, return original - // (NOTE: Go only errors on Abs() if os.Getwd fails) - return name -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal.go b/vendor/github.com/docker/docker/pkg/signal/signal.go deleted file mode 100644 index 68bb77cf5..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal.go +++ /dev/null @@ -1,54 +0,0 @@ -// Package signal provides helper functions for dealing with signals across -// various operating systems. -package signal - -import ( - "fmt" - "os" - "os/signal" - "strconv" - "strings" - "syscall" -) - -// CatchAll catches all signals and relays them to the specified channel. -func CatchAll(sigc chan os.Signal) { - handledSigs := []os.Signal{} - for _, s := range SignalMap { - handledSigs = append(handledSigs, s) - } - signal.Notify(sigc, handledSigs...) -} - -// StopCatch stops catching the signals and closes the specified channel. -func StopCatch(sigc chan os.Signal) { - signal.Stop(sigc) - close(sigc) -} - -// ParseSignal translates a string to a valid syscall signal. -// It returns an error if the signal map doesn't include the given signal. -func ParseSignal(rawSignal string) (syscall.Signal, error) { - s, err := strconv.Atoi(rawSignal) - if err == nil { - if s == 0 { - return -1, fmt.Errorf("Invalid signal: %s", rawSignal) - } - return syscall.Signal(s), nil - } - signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] - if !ok { - return -1, fmt.Errorf("Invalid signal: %s", rawSignal) - } - return signal, nil -} - -// ValidSignalForPlatform returns true if a signal is valid on the platform -func ValidSignalForPlatform(sig syscall.Signal) bool { - for _, v := range SignalMap { - if v == sig { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go deleted file mode 100644 index 946de87e9..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go +++ /dev/null @@ -1,41 +0,0 @@ -package signal - -import ( - "syscall" -) - -// SignalMap is a map of Darwin signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUG": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INFO": syscall.SIGINFO, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go deleted file mode 100644 index 6b9569bb7..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go +++ /dev/null @@ -1,43 +0,0 @@ -package signal - -import ( - "syscall" -) - -// SignalMap is a map of FreeBSD signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUF": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INFO": syscall.SIGINFO, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "LWP": syscall.SIGLWP, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "THR": syscall.SIGTHR, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go deleted file mode 100644 index d418cbe9e..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go +++ /dev/null @@ -1,80 +0,0 @@ -package signal - -import ( - "syscall" -) - -const ( - sigrtmin = 34 - sigrtmax = 64 -) - -// SignalMap is a map of Linux signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUS": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CLD": syscall.SIGCLD, - "CONT": syscall.SIGCONT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "POLL": syscall.SIGPOLL, - "PROF": syscall.SIGPROF, - "PWR": syscall.SIGPWR, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STKFLT": syscall.SIGSTKFLT, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "UNUSED": syscall.SIGUNUSED, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, - "RTMIN": sigrtmin, - "RTMIN+1": sigrtmin + 1, - "RTMIN+2": sigrtmin + 2, - "RTMIN+3": sigrtmin + 3, - "RTMIN+4": sigrtmin + 4, - "RTMIN+5": sigrtmin + 5, - "RTMIN+6": sigrtmin + 6, - "RTMIN+7": sigrtmin + 7, - "RTMIN+8": sigrtmin + 8, - "RTMIN+9": sigrtmin + 9, - "RTMIN+10": sigrtmin + 10, - "RTMIN+11": sigrtmin + 11, - "RTMIN+12": sigrtmin + 12, - "RTMIN+13": sigrtmin + 13, - "RTMIN+14": sigrtmin + 14, - "RTMIN+15": sigrtmin + 15, - "RTMAX-14": sigrtmax - 14, - "RTMAX-13": sigrtmax - 13, - "RTMAX-12": sigrtmax - 12, - "RTMAX-11": sigrtmax - 11, - "RTMAX-10": sigrtmax - 10, - "RTMAX-9": sigrtmax - 9, - "RTMAX-8": sigrtmax - 8, - "RTMAX-7": sigrtmax - 7, - "RTMAX-6": sigrtmax - 6, - "RTMAX-5": sigrtmax - 5, - "RTMAX-4": sigrtmax - 4, - "RTMAX-3": sigrtmax - 3, - "RTMAX-2": sigrtmax - 2, - "RTMAX-1": sigrtmax - 1, - "RTMAX": sigrtmax, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go b/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go deleted file mode 100644 index 89576b9e3..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go +++ /dev/null @@ -1,42 +0,0 @@ -package signal - -import ( - "syscall" -) - -// SignalMap is a map of Solaris signals. -// SIGINFO and SIGTHR not defined for Solaris -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUF": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "LWP": syscall.SIGLWP, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go deleted file mode 100644 index 5d058fd56..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !windows - -package signal - -import ( - "syscall" -) - -// Signals used in cli/command (no windows equivalent, use -// invalid signals so they don't get handled) - -const ( - // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. - SIGCHLD = syscall.SIGCHLD - // SIGWINCH is a signal sent to a process when its controlling terminal changes its size - SIGWINCH = syscall.SIGWINCH - // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading - SIGPIPE = syscall.SIGPIPE - // DefaultStopSignal is the syscall signal used to stop a container in unix systems. - DefaultStopSignal = "SIGTERM" -) diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go deleted file mode 100644 index c592d37df..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!darwin,!freebsd,!windows,!solaris - -package signal - -import ( - "syscall" -) - -// SignalMap is an empty map of signals for unsupported platform. -var SignalMap = map[string]syscall.Signal{} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go deleted file mode 100644 index 440f2700e..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build windows - -package signal - -import ( - "syscall" -) - -// Signals used in cli/command (no windows equivalent, use -// invalid signals so they don't get handled) -const ( - SIGCHLD = syscall.Signal(0xff) - SIGWINCH = syscall.Signal(0xff) - SIGPIPE = syscall.Signal(0xff) - // DefaultStopSignal is the syscall signal used to stop a container in windows systems. - DefaultStopSignal = "15" -) - -// SignalMap is a map of "supported" signals. As per the comment in GOLang's -// ztypes_windows.go: "More invented values for signals". Windows doesn't -// really support signals in any way, shape or form that Unix does. -// -// We have these so that docker kill can be used to gracefully (TERM) and -// forcibly (KILL) terminate a container on Windows. -var SignalMap = map[string]syscall.Signal{ - "KILL": syscall.SIGKILL, - "TERM": syscall.SIGTERM, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/trap.go b/vendor/github.com/docker/docker/pkg/signal/trap.go deleted file mode 100644 index 638a1ab66..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/trap.go +++ /dev/null @@ -1,103 +0,0 @@ -package signal - -import ( - "fmt" - "os" - gosignal "os/signal" - "path/filepath" - "runtime" - "strings" - "sync/atomic" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/pkg/errors" -) - -// Trap sets up a simplified signal "trap", appropriate for common -// behavior expected from a vanilla unix command-line tool in general -// (and the Docker engine in particular). -// -// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. -// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is -// skipped and the process is terminated immediately (allows force quit of stuck daemon) -// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. -// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while -// the docker daemon is not restarted and also running under systemd. -// Fixes https://github.com/docker/docker/issues/19728 -// -func Trap(cleanup func()) { - c := make(chan os.Signal, 1) - // we will handle INT, TERM, QUIT, SIGPIPE here - signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} - gosignal.Notify(c, signals...) - go func() { - interruptCount := uint32(0) - for sig := range c { - if sig == syscall.SIGPIPE { - continue - } - - go func(sig os.Signal) { - logrus.Infof("Processing signal '%v'", sig) - switch sig { - case os.Interrupt, syscall.SIGTERM: - if atomic.LoadUint32(&interruptCount) < 3 { - // Initiate the cleanup only once - if atomic.AddUint32(&interruptCount, 1) == 1 { - // Call the provided cleanup handler - cleanup() - os.Exit(0) - } else { - return - } - } else { - // 3 SIGTERM/INT signals received; force exit without cleanup - logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") - } - case syscall.SIGQUIT: - DumpStacks("") - logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") - } - //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # - os.Exit(128 + int(sig.(syscall.Signal))) - }(sig) - } - }() -} - -const stacksLogNameTemplate = "goroutine-stacks-%s.log" - -// DumpStacks appends the runtime stack into file in dir and returns full path -// to that file. -func DumpStacks(dir string) (string, error) { - var ( - buf []byte - stackSize int - ) - bufferLen := 16384 - for stackSize == len(buf) { - buf = make([]byte, bufferLen) - stackSize = runtime.Stack(buf, true) - bufferLen *= 2 - } - buf = buf[:stackSize] - var f *os.File - if dir != "" { - path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) - var err error - f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) - if err != nil { - return "", errors.Wrap(err, "failed to open file to write the goroutine stacks") - } - defer f.Close() - defer f.Sync() - } else { - f = os.Stderr - } - if _, err := f.Write(buf); err != nil { - return "", errors.Wrap(err, "failed to write goroutine stacks") - } - return f.Name(), nil -} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go deleted file mode 100644 index be2076545..000000000 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ /dev/null @@ -1,174 +0,0 @@ -package stdcopy - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "sync" -) - -// StdType is the type of standard stream -// a writer can multiplex to. -type StdType byte - -const ( - // Stdin represents standard input stream type. - Stdin StdType = iota - // Stdout represents standard output stream type. - Stdout - // Stderr represents standard error steam type. - Stderr - - stdWriterPrefixLen = 8 - stdWriterFdIndex = 0 - stdWriterSizeIndex = 4 - - startingBufLen = 32*1024 + stdWriterPrefixLen + 1 -) - -var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} - -// stdWriter is wrapper of io.Writer with extra customized info. -type stdWriter struct { - io.Writer - prefix byte -} - -// Write sends the buffer to the underneath writer. -// It inserts the prefix header before the buffer, -// so stdcopy.StdCopy knows where to multiplex the output. -// It makes stdWriter to implement io.Writer. -func (w *stdWriter) Write(p []byte) (n int, err error) { - if w == nil || w.Writer == nil { - return 0, errors.New("Writer not instantiated") - } - if p == nil { - return 0, nil - } - - header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} - binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) - buf := bufPool.Get().(*bytes.Buffer) - buf.Write(header[:]) - buf.Write(p) - - n, err = w.Writer.Write(buf.Bytes()) - n -= stdWriterPrefixLen - if n < 0 { - n = 0 - } - - buf.Reset() - bufPool.Put(buf) - return -} - -// NewStdWriter instantiates a new Writer. -// Everything written to it will be encapsulated using a custom format, -// and written to the underlying `w` stream. -// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. -// `t` indicates the id of the stream to encapsulate. -// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. -func NewStdWriter(w io.Writer, t StdType) io.Writer { - return &stdWriter{ - Writer: w, - prefix: byte(t), - } -} - -// StdCopy is a modified version of io.Copy. -// -// StdCopy will demultiplex `src`, assuming that it contains two streams, -// previously multiplexed together using a StdWriter instance. -// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. -// -// StdCopy will read until it hits EOF on `src`. It will then return a nil error. -// In other words: if `err` is non nil, it indicates a real underlying error. -// -// `written` will hold the total number of bytes written to `dstout` and `dsterr`. -func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { - var ( - buf = make([]byte, startingBufLen) - bufLen = len(buf) - nr, nw int - er, ew error - out io.Writer - frameSize int - ) - - for { - // Make sure we have at least a full header - for nr < stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < stdWriterPrefixLen { - return written, nil - } - break - } - if er != nil { - return 0, er - } - } - - // Check the first byte to know where to write - switch StdType(buf[stdWriterFdIndex]) { - case Stdin: - fallthrough - case Stdout: - // Write on stdout - out = dstout - case Stderr: - // Write on stderr - out = dsterr - default: - return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) - } - - // Retrieve the size of the frame - frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) - - // Check if the buffer is big enough to read the frame. - // Extend it if necessary. - if frameSize+stdWriterPrefixLen > bufLen { - buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) - bufLen = len(buf) - } - - // While the amount of bytes read is less than the size of the frame + header, we keep reading - for nr < frameSize+stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < frameSize+stdWriterPrefixLen { - return written, nil - } - break - } - if er != nil { - return 0, er - } - } - - // Write the retrieved frame (without header) - nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) - if ew != nil { - return 0, ew - } - // If the frame has not been fully written: error - if nw != frameSize { - return 0, io.ErrShortWrite - } - written += int64(nw) - - // Move the rest of the buffer to the beginning - copy(buf, buf[frameSize+stdWriterPrefixLen:]) - // Move the index - nr -= frameSize + stdWriterPrefixLen - } -} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go deleted file mode 100644 index ce6ea79de..000000000 --- a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go +++ /dev/null @@ -1,172 +0,0 @@ -// Package streamformatter provides helper functions to format a stream. -package streamformatter - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" -) - -// StreamFormatter formats a stream, optionally using JSON. -type StreamFormatter struct { - json bool -} - -// NewStreamFormatter returns a simple StreamFormatter -func NewStreamFormatter() *StreamFormatter { - return &StreamFormatter{} -} - -// NewJSONStreamFormatter returns a StreamFormatter configured to stream json -func NewJSONStreamFormatter() *StreamFormatter { - return &StreamFormatter{true} -} - -const streamNewline = "\r\n" - -var streamNewlineBytes = []byte(streamNewline) - -// FormatStream formats the specified stream. -func (sf *StreamFormatter) FormatStream(str string) []byte { - if sf.json { - b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) - } - return []byte(str + "\r") -} - -// FormatStatus formats the specified objects according to the specified format (and id). -func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { - str := fmt.Sprintf(format, a...) - if sf.json { - b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) - } - return []byte(str + streamNewline) -} - -// FormatError formats the specified error. -func (sf *StreamFormatter) FormatError(err error) []byte { - if sf.json { - jsonError, ok := err.(*jsonmessage.JSONError) - if !ok { - jsonError = &jsonmessage.JSONError{Message: err.Error()} - } - if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { - return append(b, streamNewlineBytes...) - } - return []byte("{\"error\":\"format error\"}" + streamNewline) - } - return []byte("Error: " + err.Error() + streamNewline) -} - -// FormatProgress formats the progress information for a specified action. -func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { - if progress == nil { - progress = &jsonmessage.JSONProgress{} - } - if sf.json { - var auxJSON *json.RawMessage - if aux != nil { - auxJSONBytes, err := json.Marshal(aux) - if err != nil { - return nil - } - auxJSON = new(json.RawMessage) - *auxJSON = auxJSONBytes - } - b, err := json.Marshal(&jsonmessage.JSONMessage{ - Status: action, - ProgressMessage: progress.String(), - Progress: progress, - ID: id, - Aux: auxJSON, - }) - if err != nil { - return nil - } - return append(b, streamNewlineBytes...) - } - endl := "\r" - if progress.String() == "" { - endl += "\n" - } - return []byte(action + " " + progress.String() + endl) -} - -// NewProgressOutput returns a progress.Output object that can be passed to -// progress.NewProgressReader. -func (sf *StreamFormatter) NewProgressOutput(out io.Writer, newLines bool) progress.Output { - return &progressOutput{ - sf: sf, - out: out, - newLines: newLines, - } -} - -type progressOutput struct { - sf *StreamFormatter - out io.Writer - newLines bool -} - -// WriteProgress formats progress information from a ProgressReader. -func (out *progressOutput) WriteProgress(prog progress.Progress) error { - var formatted []byte - if prog.Message != "" { - formatted = out.sf.FormatStatus(prog.ID, prog.Message) - } else { - jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total} - formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) - } - _, err := out.out.Write(formatted) - if err != nil { - return err - } - - if out.newLines && prog.LastUpdate { - _, err = out.out.Write(out.sf.FormatStatus("", "")) - return err - } - - return nil -} - -// StdoutFormatter is a streamFormatter that writes to the standard output. -type StdoutFormatter struct { - io.Writer - *StreamFormatter -} - -func (sf *StdoutFormatter) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} - -// StderrFormatter is a streamFormatter that writes to the standard error. -type StderrFormatter struct { - io.Writer - *StreamFormatter -} - -func (sf *StderrFormatter) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go deleted file mode 100644 index fa35d8bad..000000000 --- a/vendor/github.com/docker/docker/pkg/stringid/stringid.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package stringid provides helper functions for dealing with string identifiers -package stringid - -import ( - "crypto/rand" - "encoding/hex" - "io" - "regexp" - "strconv" - "strings" - - "github.com/docker/docker/pkg/random" -) - -const shortLen = 12 - -var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") - -// IsShortID determines if an arbitrary string *looks like* a short ID. -func IsShortID(id string) bool { - return validShortID.MatchString(id) -} - -// TruncateID returns a shorthand version of a string identifier for convenience. -// A collision with other shorthands is very unlikely, but possible. -// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller -// will need to use a longer prefix, or the full-length Id. -func TruncateID(id string) string { - if i := strings.IndexRune(id, ':'); i >= 0 { - id = id[i+1:] - } - if len(id) > shortLen { - id = id[:shortLen] - } - return id -} - -func generateID(crypto bool) string { - b := make([]byte, 32) - r := random.Reader - if crypto { - r = rand.Reader - } - for { - if _, err := io.ReadFull(r, b); err != nil { - panic(err) // This shouldn't happen - } - id := hex.EncodeToString(b) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numeric and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { - continue - } - return id - } -} - -// GenerateRandomID returns a unique id. -func GenerateRandomID() string { - return generateID(true) -} - -// GenerateNonCryptoID generates unique id without using cryptographically -// secure sources of random. -// It helps you to save entropy. -func GenerateNonCryptoID() string { - return generateID(false) -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE deleted file mode 100644 index 34c4ea7c5..000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD deleted file mode 100644 index 9b4f4a294..000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs.go b/vendor/github.com/docker/docker/pkg/symlink/fs.go deleted file mode 100644 index f6bc2231f..000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.BSD file. - -// This code is a modified version of path/filepath/symlink.go from the Go standard library. - -package symlink - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an -// absolute path. This function handles paths in a platform-agnostic manner. -func FollowSymlinkInScope(path, root string) (string, error) { - path, err := filepath.Abs(filepath.FromSlash(path)) - if err != nil { - return "", err - } - root, err = filepath.Abs(filepath.FromSlash(root)) - if err != nil { - return "", err - } - return evalSymlinksInScope(path, root) -} - -// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return -// a result guaranteed to be contained within the scope `root`, at the time of the call. -// Symlinks in `root` are not evaluated and left as-is. -// Errors encountered while attempting to evaluate symlinks in path will be returned. -// Non-existing paths are valid and do not constitute an error. -// `path` has to contain `root` as a prefix, or else an error will be returned. -// Trying to break out from `root` does not constitute an error. -// -// Example: -// If /foo/bar -> /outside, -// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" -// -// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks -// are created and not to create subsequently, additional symlinks that could potentially make a -// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") -// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should -// no longer be considered safely contained in "/foo". -func evalSymlinksInScope(path, root string) (string, error) { - root = filepath.Clean(root) - if path == root { - return path, nil - } - if !strings.HasPrefix(path, root) { - return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) - } - const maxIter = 255 - originalPath := path - // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" - path = path[len(root):] - if root == string(filepath.Separator) { - path = string(filepath.Separator) + path - } - if !strings.HasPrefix(path, string(filepath.Separator)) { - return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) - } - path = filepath.Clean(path) - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - // b here will always be considered to be the "current absolute path inside - // root" when we append paths to it, we also append a slash and use - // filepath.Clean after the loop to trim the trailing slash - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) - } - - // find next path component, p - i := strings.IndexRune(path, filepath.Separator) - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - continue - } - - // this takes a b.String() like "b/../" and a p like "c" and turns it - // into "/b/../c" which then gets filepath.Cleaned into "/c" and then - // root gets prepended and we Clean again (to remove any trailing slash - // if the first Clean gave us just "/") - cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) - if isDriveOrRoot(cleanP) { - // never Lstat "/" itself, or drive letters on Windows - b.Reset() - continue - } - fullP := filepath.Clean(root + cleanP) - - fi, err := os.Lstat(fullP) - if os.IsNotExist(err) { - // if p does not exist, accept it - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(fullP) - if err != nil { - return "", err - } - if system.IsAbs(dest) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - - // see note above on "fullP := ..." for why this is double-cleaned and - // what's happening here - return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil -} - -// EvalSymlinks returns the path name after the evaluation of any symbolic -// links. -// If path is relative the result will be relative to the current directory, -// unless one of the components is an absolute symbolic link. -// This version has been updated to support long paths prepended with `\\?\`. -func EvalSymlinks(path string) (string, error) { - return evalSymlinks(path) -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go deleted file mode 100644 index 22708273d..000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package symlink - -import ( - "path/filepath" -) - -func evalSymlinks(path string) (string, error) { - return filepath.EvalSymlinks(path) -} - -func isDriveOrRoot(p string) bool { - return p == string(filepath.Separator) -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go deleted file mode 100644 index 241e531f9..000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go +++ /dev/null @@ -1,169 +0,0 @@ -package symlink - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/docker/docker/pkg/longpath" -) - -func toShort(path string) (string, error) { - p, err := syscall.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetShortPathName says we can reuse buffer - n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - if _, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { - return "", err - } - } - return syscall.UTF16ToString(b), nil -} - -func toLong(path string) (string, error) { - p, err := syscall.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetLongPathName says we can reuse buffer - n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - } - b = b[:n] - return syscall.UTF16ToString(b), nil -} - -func evalSymlinks(path string) (string, error) { - path, err := walkSymlinks(path) - if err != nil { - return "", err - } - - p, err := toShort(path) - if err != nil { - return "", err - } - p, err = toLong(p) - if err != nil { - return "", err - } - // syscall.GetLongPathName does not change the case of the drive letter, - // but the result of EvalSymlinks must be unique, so we have - // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). - // Make drive letter upper case. - if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { - p = string(p[0]+'A'-'a') + p[1:] - } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { - p = p[:3] + string(p[4]+'A'-'a') + p[5:] - } - return filepath.Clean(p), nil -} - -const utf8RuneSelf = 0x80 - -func walkSymlinks(path string) (string, error) { - const maxIter = 255 - originalPath := path - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("EvalSymlinks: too many links in " + originalPath) - } - - // A path beginning with `\\?\` represents the root, so automatically - // skip that part and begin processing the next segment. - if strings.HasPrefix(path, longpath.Prefix) { - b.WriteString(longpath.Prefix) - path = path[4:] - continue - } - - // find next path component, p - var i = -1 - for j, c := range path { - if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { - i = j - break - } - } - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - if b.Len() == 0 { - // must be absolute path - b.WriteRune(filepath.Separator) - } - continue - } - - // If this is the first segment after the long path prefix, accept the - // current segment as a volume root or UNC share and move on to the next. - if b.String() == longpath.Prefix { - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - - fi, err := os.Lstat(b.String() + p) - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p) - if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { - b.WriteRune(filepath.Separator) - } - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(b.String() + p) - if err != nil { - return "", err - } - if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - return filepath.Clean(b.String()), nil -} - -func isDriveOrRoot(p string) bool { - if p == string(filepath.Separator) { - return true - } - - length := len(p) - if length >= 2 { - if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go deleted file mode 100644 index 7637f12e1..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes.go +++ /dev/null @@ -1,52 +0,0 @@ -package system - -import ( - "os" - "syscall" - "time" - "unsafe" -) - -var ( - maxTime time.Time -) - -func init() { - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} - -// Chtimes changes the access time and modified time of a file at the given path -func Chtimes(name string, atime time.Time, mtime time.Time) error { - unixMinTime := time.Unix(0, 0) - unixMaxTime := maxTime - - // If the modified time is prior to the Unix Epoch, or after the - // end of Unix Time, os.Chtimes has undefined behavior - // default to Unix Epoch in this case, just in case - - if atime.Before(unixMinTime) || atime.After(unixMaxTime) { - atime = unixMinTime - } - - if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { - mtime = unixMinTime - } - - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - // Take platform specific action for setting create time. - if err := setCTime(name, mtime); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go deleted file mode 100644 index 09d58bcbf..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !windows - -package system - -import ( - "time" -) - -//setCTime will set the create time on a file. On Unix, the create -//time is updated as a side effect of setting the modified time, so -//no action is required. -func setCTime(path string, ctime time.Time) error { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go deleted file mode 100644 index 294586846..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build windows - -package system - -import ( - "syscall" - "time" -) - -//setCTime will set the create time on a file. On Windows, this requires -//calling SetFileTime and explicitly including the create time. -func setCTime(path string, ctime time.Time) error { - ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) - pathp, e := syscall.UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := syscall.CreateFile(pathp, - syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, - syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer syscall.Close(h) - c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) - return syscall.SetFileTime(h, &c, nil, nil) -} diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go deleted file mode 100644 index 288318985..000000000 --- a/vendor/github.com/docker/docker/pkg/system/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package system - -import ( - "errors" -) - -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") -) diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go deleted file mode 100644 index 3ec6d2215..000000000 --- a/vendor/github.com/docker/docker/pkg/system/events_windows.go +++ /dev/null @@ -1,85 +0,0 @@ -package system - -// This file implements syscalls for Win32 events which are not implemented -// in golang. - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - procCreateEvent = modkernel32.NewProc("CreateEventW") - procOpenEvent = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") -) - -// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. -func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if manualReset { - _p1 = 1 - } - var _p2 uint32 - if initialState { - _p2 = 1 - } - r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if inheritHandle { - _p1 = 1 - } - r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// SetEvent implements win32 SetEvent func in golang. -func SetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procSetEvent) -} - -// ResetEvent implements win32 ResetEvent func in golang. -func ResetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procResetEvent) -} - -// PulseEvent implements win32 PulseEvent func in golang. -func PulseEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procPulseEvent) -} - -func setResetPulse(handle syscall.Handle, proc *windows.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -var temp unsafe.Pointer - -// use ensures a variable is kept alive without the GC freeing while still needed -func use(p unsafe.Pointer) { - temp = p -} diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go deleted file mode 100644 index 60f0514b1..000000000 --- a/vendor/github.com/docker/docker/pkg/system/exitcode.go +++ /dev/null @@ -1,33 +0,0 @@ -package system - -import ( - "fmt" - "os/exec" - "syscall" -) - -// GetExitCode returns the ExitStatus of the specified error if its type is -// exec.ExitError, returns 0 and an error otherwise. -func GetExitCode(err error) (int, error) { - exitCode := 0 - if exiterr, ok := err.(*exec.ExitError); ok { - if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return procExit.ExitStatus(), nil - } - } - return exitCode, fmt.Errorf("failed to get exit code") -} - -// ProcessExitCode process the specified error and returns the exit status code -// if the error was of type exec.ExitError, returns nothing otherwise. -func ProcessExitCode(err error) (exitCode int) { - if err != nil { - var exiterr error - if exitCode, exiterr = GetExitCode(err); exiterr != nil { - // TODO: Fix this so we check the error's text. - // we've failed to retrieve exit code, so we set it to 127 - exitCode = 127 - } - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go deleted file mode 100644 index 810c79478..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build !windows - -package system - -import ( - "os" - "path/filepath" -) - -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return MkdirAll(path, perm) -} - -// MkdirAll creates a directory named path along with any necessary parents, -// with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} - -// The functions below here are wrappers for the equivalents in the os package. -// They are passthrough on Unix platforms, and only relevant on Windows. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return os.Create(name) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return os.Open(name) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(name, flag, perm) -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go deleted file mode 100644 index 6094f01fd..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ /dev/null @@ -1,236 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "path/filepath" - "regexp" - "strings" - "syscall" - "unsafe" - - winio "github.com/Microsoft/go-winio" -) - -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return mkdirall(path, true) -} - -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, _ os.FileMode) error { - return mkdirall(path, false) -} - -// mkdirall is a custom version of os.MkdirAll modified for use on Windows -// so that it is both volume path aware, and can create a directory with -// a DACL. -func mkdirall(path string, adminAndLocalSystem bool) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is largely copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = mkdirall(path[0:j-1], false) - if err != nil { - return err - } - } - - // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if adminAndLocalSystem { - err = mkdirWithACL(path) - } else { - err = os.Mkdir(path, 0) - } - - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// mkdirWithACL creates a new directory. If there is an error, it will be of -// type *PathError. . -// -// This is a modified and combined version of os.Mkdir and syscall.Mkdir -// in golang to cater for creating a directory am ACL permitting full -// access, with inheritance, to any subfolder/file for Built-in Administrators -// and Local System. -func mkdirWithACL(name string) error { - sa := syscall.SecurityAttributes{Length: 0} - sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" - sd, err := winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) - - namep, err := syscall.UTF16PtrFromString(name) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - - e := syscall.CreateDirectory(namep, &sa) - if e != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: e} - } - return nil -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false - } - } - return true -} - -// The origin of the functions below here are the golang OS and syscall packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDONLY, 0) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, errf := syscallOpenFileSequential(name, flag, 0) - if errf == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: errf} -} - -func syscallOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := syscallOpenSequential(name, flag|syscall.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -func makeInheritSa() *syscall.SecurityAttributes { - var sa syscall.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func syscallOpenSequential(path string, mode int, _ uint32) (fd syscall.Handle, err error) { - if len(path) == 0 { - return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND - } - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return syscall.InvalidHandle, err - } - var access uint32 - switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { - case syscall.O_RDONLY: - access = syscall.GENERIC_READ - case syscall.O_WRONLY: - access = syscall.GENERIC_WRITE - case syscall.O_RDWR: - access = syscall.GENERIC_READ | syscall.GENERIC_WRITE - } - if mode&syscall.O_CREAT != 0 { - access |= syscall.GENERIC_WRITE - } - if mode&syscall.O_APPEND != 0 { - access &^= syscall.GENERIC_WRITE - access |= syscall.FILE_APPEND_DATA - } - sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) - var sa *syscall.SecurityAttributes - if mode&syscall.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): - createmode = syscall.CREATE_NEW - case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): - createmode = syscall.CREATE_ALWAYS - case mode&syscall.O_CREAT == syscall.O_CREAT: - createmode = syscall.OPEN_ALWAYS - case mode&syscall.O_TRUNC == syscall.O_TRUNC: - createmode = syscall.TRUNCATE_EXISTING - default: - createmode = syscall.OPEN_EXISTING - } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/docker/docker/pkg/system/lstat.go deleted file mode 100644 index bd23c4d50..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go deleted file mode 100644 index 49e87eb40..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package system - -import ( - "os" -) - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -// Note the Linux version uses fromStatT to do the copy back, -// but that not strictly necessary when already in an OS specific module. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return &StatT{ - name: fi.Name(), - size: fi.Size(), - mode: fi.Mode(), - modTime: fi.ModTime(), - isDir: fi.IsDir()}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go deleted file mode 100644 index 3b6e947e6..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go deleted file mode 100644 index 385f1d5e7..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,65 +0,0 @@ -package system - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" - - "github.com/docker/go-units" -) - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given an io.Reader to the file. -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - bytes := int64(size) * units.KiB - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go deleted file mode 100644 index 7f4f84f73..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go +++ /dev/null @@ -1,128 +0,0 @@ -// +build solaris,cgo - -package system - -import ( - "fmt" - "unsafe" -) - -// #cgo LDFLAGS: -lkstat -// #include -// #include -// #include -// #include -// #include -// #include -// struct swaptable *allocSwaptable(int num) { -// struct swaptable *st; -// struct swapent *swapent; -// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); -// swapent = st->swt_ent; -// for (int i = 0; i < num; i++,swapent++) { -// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); -// } -// st->swt_n = num; -// return st; -//} -// void freeSwaptable (struct swaptable *st) { -// struct swapent *swapent = st->swt_ent; -// for (int i = 0; i < st->swt_n; i++,swapent++) { -// free(swapent->ste_path); -// } -// free(st); -// } -// swapent_t getSwapEnt(swapent_t *ent, int i) { -// return ent[i]; -// } -// int64_t getPpKernel() { -// int64_t pp_kernel = 0; -// kstat_ctl_t *ksc; -// kstat_t *ks; -// kstat_named_t *knp; -// kid_t kid; -// -// if ((ksc = kstat_open()) == NULL) { -// return -1; -// } -// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { -// return -1; -// } -// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || -// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { -// return -1; -// } -// switch (knp->data_type) { -// case KSTAT_DATA_UINT64: -// pp_kernel = knp->value.ui64; -// break; -// case KSTAT_DATA_UINT32: -// pp_kernel = knp->value.ui32; -// break; -// } -// pp_kernel *= sysconf(_SC_PAGESIZE); -// return (pp_kernel > 0 ? pp_kernel : -1); -// } -import "C" - -// Get the system memory info using sysconf same as prtconf -func getTotalMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_PHYS_PAGES) - return int64(pagesize * npages) -} - -func getFreeMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_AVPHYS_PAGES) - return int64(pagesize * npages) -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - - ppKernel := C.getPpKernel() - MemTotal := getTotalMem() - MemFree := getFreeMem() - SwapTotal, SwapFree, err := getSysSwap() - - if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || - SwapFree < 0 { - return nil, fmt.Errorf("error getting system memory info %v\n", err) - } - - meminfo := &MemInfo{} - // Total memory is total physical memory less than memory locked by kernel - meminfo.MemTotal = MemTotal - int64(ppKernel) - meminfo.MemFree = MemFree - meminfo.SwapTotal = SwapTotal - meminfo.SwapFree = SwapFree - - return meminfo, nil -} - -func getSysSwap() (int64, int64, error) { - var tSwap int64 - var fSwap int64 - var diskblksPerPage int64 - num, err := C.swapctl(C.SC_GETNSWP, nil) - if err != nil { - return -1, -1, err - } - st := C.allocSwaptable(num) - _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) - if err != nil { - C.freeSwaptable(st) - return -1, -1, err - } - - diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) - for i := 0; i < int(num); i++ { - swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) - tSwap += int64(swapent.ste_pages) * diskblksPerPage - fSwap += int64(swapent.ste_free) * diskblksPerPage - } - C.freeSwaptable(st) - return tSwap, fSwap, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go deleted file mode 100644 index 3ce019dff..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux,!windows,!solaris - -package system - -// ReadMemInfo is not supported on platforms other than linux and windows. -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go deleted file mode 100644 index 883944a4c..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -package system - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go deleted file mode 100644 index 73958182b..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return syscall.Mknod(path, mode, dev) -} - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index 2e863c021..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package system - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go deleted file mode 100644 index c607c4db0..000000000 --- a/vendor/github.com/docker/docker/pkg/system/path_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !windows - -package system - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go deleted file mode 100644 index cbfe2c157..000000000 --- a/vendor/github.com/docker/docker/pkg/system/path_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build windows - -package system - -import ( - "fmt" - "path/filepath" - "strings" -) - -// DefaultPathEnv is deliberately empty on Windows as the default path will be set by -// the container. Docker has no context of what the default path should be. -const DefaultPathEnv = "" - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be contatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/docker/docker/pkg/system/stat.go deleted file mode 100644 index 087034c5e..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// StatT type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file. -type StatT struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec -} - -// Mode returns file's permission mode. -func (s StatT) Mode() uint32 { - return s.mode -} - -// UID returns file's user id of owner. -func (s StatT) UID() uint32 { - return s.uid -} - -// GID returns file's group id of owner. -func (s StatT) GID() uint32 { - return s.gid -} - -// Rdev returns file's device ID (if it's special file). -func (s StatT) Rdev() uint64 { - return s.rdev -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() syscall.Timespec { - return s.mtim -} - -// GetLastModification returns file's last modification time. -func (s StatT) GetLastModification() syscall.Timespec { - return s.Mtim() -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go deleted file mode 100644 index f0742f59e..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go +++ /dev/null @@ -1,32 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} - -// FromStatT loads a system.StatT from a syscall.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go deleted file mode 100644 index d0fb6f151..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} - -// Stat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go deleted file mode 100644 index 8b1eded13..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ /dev/null @@ -1,33 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: s.Rdev, - mtim: s.Mtim}, nil -} - -// FromStatT exists only on linux, and loads a system.StatT from a -// syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go deleted file mode 100644 index 3c3b71fb2..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go +++ /dev/null @@ -1,15 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go deleted file mode 100644 index 0216985a2..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build solaris - -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} - -// FromStatT loads a system.StatT from a syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go deleted file mode 100644 index 5d85f523c..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris,!openbsd,!darwin - -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go deleted file mode 100644 index 39490c625..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like name, permission, size, etc about a file. -type StatT struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -// Name returns file's name. -func (s StatT) Name() string { - return s.name -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return s.mode -} - -// ModTime returns file's last modification time. -func (s StatT) ModTime() time.Time { - return s.modTime -} - -// IsDir returns whether file is actually a directory. -func (s StatT) IsDir() bool { - return s.isDir -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go deleted file mode 100644 index 3ae912846..000000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build linux freebsd - -package system - -import "syscall" - -// Unmount is a platform-specific helper function to call -// the unmount syscall. -func Unmount(dest string) error { - return syscall.Unmount(dest, 0) -} - -// CommandLineToArgv should not be used on Unix. -// It simply returns commandLine in the only element in the returned array. -func CommandLineToArgv(commandLine string) ([]string, error) { - return []string{commandLine}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go deleted file mode 100644 index 1f311874f..000000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ /dev/null @@ -1,105 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" - - "github.com/Sirupsen/logrus" -) - -var ( - ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx -type osVersionInfoEx struct { - OSVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformID uint32 - CSDVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - Reserve byte -} - -// GetOSVersion gets the operating system version on Windows. Note that -// docker.exe must be manifested to get the correct version information. -func GetOSVersion() OSVersion { - var err error - osv := OSVersion{} - osv.Version, err = syscall.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - return osv -} - -// IsWindowsClient returns true if the SKU is client -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. -func IsWindowsClient() bool { - osviex := &osVersionInfoEx{OSVersionInfoSize: 284} - r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) - if r1 == 0 { - logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) - return false - } - const verNTWorkstation = 0x00000001 - return osviex.ProductType == verNTWorkstation -} - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(dest string) error { - return nil -} - -// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. -func CommandLineToArgv(commandLine string) ([]string, error) { - var argc int32 - - argsPtr, err := syscall.UTF16PtrFromString(commandLine) - if err != nil { - return nil, err - } - - argv, err := syscall.CommandLineToArgv(argsPtr, &argc) - if err != nil { - return nil, err - } - defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) - - newArgs := make([]string, argc) - for i, v := range (*argv)[:argc] { - newArgs[i] = string(syscall.UTF16ToString((*v)[:])) - } - - return newArgs, nil -} - -// HasWin32KSupport determines whether containers that depend on win32k can -// run on this machine. Win32k is the driver used to implement windowing. -func HasWin32KSupport() bool { - // For now, check for ntuser API support on the host. In the future, a host - // may support win32k in containers even if the host does not support ntuser - // APIs. - return ntuserApiset.Load() == nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go deleted file mode 100644 index 3d0146b01..000000000 --- a/vendor/github.com/docker/docker/pkg/system/umask.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Umask sets current process's file mode creation mask to newmask -// and returns oldmask. -func Umask(newmask int) (oldmask int, err error) { - return syscall.Umask(newmask), nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go deleted file mode 100644 index 13f1de176..000000000 --- a/vendor/github.com/docker/docker/pkg/system/umask_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package system - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go deleted file mode 100644 index e2eac3b55..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,22 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go deleted file mode 100644 index fc8a1aba9..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - // These are not currently available in syscall - atFdCwd := -100 - atSymLinkNoFollow := 0x100 - - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go deleted file mode 100644 index 139714544..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!freebsd - -package system - -import "syscall" - -// LUtimesNano is only supported on linux and freebsd. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go deleted file mode 100644 index d2e2c0579..000000000 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,63 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// It will returns a nil slice and nil error if the xattr is not set. -func Lgetxattr(path string, attr string) ([]byte, error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - - dest := make([]byte, 128) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno == syscall.ENODATA { - return nil, nil - } - if errno == syscall.ERANGE { - dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - } - if errno != 0 { - return nil, errno - } - - return dest[:sz], nil -} - -var _zero uintptr - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go deleted file mode 100644 index 0114f2227..000000000 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package system - -// Lgetxattr is not supported on platforms other than linux. -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// Lsetxattr is not supported on platforms other than linux. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go deleted file mode 100644 index b42983e98..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go +++ /dev/null @@ -1,21 +0,0 @@ -package tarsum - -// BuilderContext is an interface extending TarSum by adding the Remove method. -// In general there was concern about adding this method to TarSum itself -// so instead it is being added just to "BuilderContext" which will then -// only be used during the .dockerignore file processing -// - see builder/evaluator.go -type BuilderContext interface { - TarSum - Remove(string) -} - -func (bc *tarSum) Remove(filename string) { - for i, fis := range bc.sums { - if fis.Name() == filename { - bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) - // Note, we don't just return because there could be - // more than one with this name - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go deleted file mode 100644 index 5abf5e7ba..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go +++ /dev/null @@ -1,126 +0,0 @@ -package tarsum - -import "sort" - -// FileInfoSumInterface provides an interface for accessing file checksum -// information within a tar file. This info is accessed through interface -// so the actual name and sum cannot be melded with. -type FileInfoSumInterface interface { - // File name - Name() string - // Checksum of this particular file and its headers - Sum() string - // Position of file in the tar - Pos() int64 -} - -type fileInfoSum struct { - name string - sum string - pos int64 -} - -func (fis fileInfoSum) Name() string { - return fis.name -} -func (fis fileInfoSum) Sum() string { - return fis.sum -} -func (fis fileInfoSum) Pos() int64 { - return fis.pos -} - -// FileInfoSums provides a list of FileInfoSumInterfaces. -type FileInfoSums []FileInfoSumInterface - -// GetFile returns the first FileInfoSumInterface with a matching name. -func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { - for i := range fis { - if fis[i].Name() == name { - return fis[i] - } - } - return nil -} - -// GetAllFile returns a FileInfoSums with all matching names. -func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { - f := FileInfoSums{} - for i := range fis { - if fis[i].Name() == name { - f = append(f, fis[i]) - } - } - return f -} - -// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. -func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { - seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. - for i := range fis { - f := fis[i] - if _, ok := seen[f.Name()]; ok { - dups = append(dups, f) - } else { - seen[f.Name()] = 0 - } - } - return dups -} - -// Len returns the size of the FileInfoSums. -func (fis FileInfoSums) Len() int { return len(fis) } - -// Swap swaps two FileInfoSum values if a FileInfoSums list. -func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } - -// SortByPos sorts FileInfoSums content by position. -func (fis FileInfoSums) SortByPos() { - sort.Sort(byPos{fis}) -} - -// SortByNames sorts FileInfoSums content by name. -func (fis FileInfoSums) SortByNames() { - sort.Sort(byName{fis}) -} - -// SortBySums sorts FileInfoSums content by sums. -func (fis FileInfoSums) SortBySums() { - dups := fis.GetDuplicatePaths() - if len(dups) > 0 { - sort.Sort(bySum{fis, dups}) - } else { - sort.Sort(bySum{fis, nil}) - } -} - -// byName is a sort.Sort helper for sorting by file names. -// If names are the same, order them by their appearance in the tar archive -type byName struct{ FileInfoSums } - -func (bn byName) Less(i, j int) bool { - if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { - return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() - } - return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() -} - -// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive -type bySum struct { - FileInfoSums - dups FileInfoSums -} - -func (bs bySum) Less(i, j int) bool { - if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { - return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() - } - return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() -} - -// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order -type byPos struct{ FileInfoSums } - -func (bp byPos) Less(i, j int) bool { - return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go deleted file mode 100644 index 154788db8..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go +++ /dev/null @@ -1,295 +0,0 @@ -// Package tarsum provides algorithms to perform checksum calculation on -// filesystem layers. -// -// The transportation of filesystems, regarding Docker, is done with tar(1) -// archives. There are a variety of tar serialization formats [2], and a key -// concern here is ensuring a repeatable checksum given a set of inputs from a -// generic tar archive. Types of transportation include distribution to and from a -// registry endpoint, saving and loading through commands or Docker daemon APIs, -// transferring the build context from client to Docker daemon, and committing the -// filesystem of a container to become an image. -// -// As tar archives are used for transit, but not preserved in many situations, the -// focus of the algorithm is to ensure the integrity of the preserved filesystem, -// while maintaining a deterministic accountability. This includes neither -// constraining the ordering or manipulation of the files during the creation or -// unpacking of the archive, nor include additional metadata state about the file -// system attributes. -package tarsum - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "crypto" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "hash" - "io" - "path" - "strings" -) - -const ( - buf8K = 8 * 1024 - buf16K = 16 * 1024 - buf32K = 32 * 1024 -) - -// NewTarSum creates a new interface for calculating a fixed time checksum of a -// tar archive. -// -// This is used for calculating checksums of layers of an image, in some cases -// including the byte payload of the image's json metadata as well, and for -// calculating the checksums for buildcache. -func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { - return NewTarSumHash(r, dc, v, DefaultTHash) -} - -// NewTarSumHash creates a new TarSum, providing a THash to use rather than -// the DefaultTHash. -func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { - headerSelector, err := getTarHeaderSelector(v) - if err != nil { - return nil, err - } - ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} - err = ts.initTarSum() - return ts, err -} - -// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. -func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { - parts := strings.SplitN(label, "+", 2) - if len(parts) != 2 { - return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") - } - - versionName, hashName := parts[0], parts[1] - - version, ok := tarSumVersionsByName[versionName] - if !ok { - return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) - } - - hashConfig, ok := standardHashConfigs[hashName] - if !ok { - return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) - } - - tHash := NewTHash(hashConfig.name, hashConfig.hash.New) - - return NewTarSumHash(r, disableCompression, version, tHash) -} - -// TarSum is the generic interface for calculating fixed time -// checksums of a tar archive. -type TarSum interface { - io.Reader - GetSums() FileInfoSums - Sum([]byte) string - Version() Version - Hash() THash -} - -// tarSum struct is the structure for a Version0 checksum calculation. -type tarSum struct { - io.Reader - tarR *tar.Reader - tarW *tar.Writer - writer writeCloseFlusher - bufTar *bytes.Buffer - bufWriter *bytes.Buffer - bufData []byte - h hash.Hash - tHash THash - sums FileInfoSums - fileCounter int64 - currentFile string - finished bool - first bool - DisableCompression bool // false by default. When false, the output gzip compressed. - tarSumVersion Version // this field is not exported so it can not be mutated during use - headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive -} - -func (ts tarSum) Hash() THash { - return ts.tHash -} - -func (ts tarSum) Version() Version { - return ts.tarSumVersion -} - -// THash provides a hash.Hash type generator and its name. -type THash interface { - Hash() hash.Hash - Name() string -} - -// NewTHash is a convenience method for creating a THash. -func NewTHash(name string, h func() hash.Hash) THash { - return simpleTHash{n: name, h: h} -} - -type tHashConfig struct { - name string - hash crypto.Hash -} - -var ( - // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. - standardHashConfigs = map[string]tHashConfig{ - "sha256": {name: "sha256", hash: crypto.SHA256}, - "sha512": {name: "sha512", hash: crypto.SHA512}, - } -) - -// DefaultTHash is default TarSum hashing algorithm - "sha256". -var DefaultTHash = NewTHash("sha256", sha256.New) - -type simpleTHash struct { - n string - h func() hash.Hash -} - -func (sth simpleTHash) Name() string { return sth.n } -func (sth simpleTHash) Hash() hash.Hash { return sth.h() } - -func (ts *tarSum) encodeHeader(h *tar.Header) error { - for _, elem := range ts.headerSelector.selectHeaders(h) { - if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { - return err - } - } - return nil -} - -func (ts *tarSum) initTarSum() error { - ts.bufTar = bytes.NewBuffer([]byte{}) - ts.bufWriter = bytes.NewBuffer([]byte{}) - ts.tarR = tar.NewReader(ts.Reader) - ts.tarW = tar.NewWriter(ts.bufTar) - if !ts.DisableCompression { - ts.writer = gzip.NewWriter(ts.bufWriter) - } else { - ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} - } - if ts.tHash == nil { - ts.tHash = DefaultTHash - } - ts.h = ts.tHash.Hash() - ts.h.Reset() - ts.first = true - ts.sums = FileInfoSums{} - return nil -} - -func (ts *tarSum) Read(buf []byte) (int, error) { - if ts.finished { - return ts.bufWriter.Read(buf) - } - if len(ts.bufData) < len(buf) { - switch { - case len(buf) <= buf8K: - ts.bufData = make([]byte, buf8K) - case len(buf) <= buf16K: - ts.bufData = make([]byte, buf16K) - case len(buf) <= buf32K: - ts.bufData = make([]byte, buf32K) - default: - ts.bufData = make([]byte, len(buf)) - } - } - buf2 := ts.bufData[:len(buf)] - - n, err := ts.tarR.Read(buf2) - if err != nil { - if err == io.EOF { - if _, err := ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - if !ts.first { - ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) - ts.fileCounter++ - ts.h.Reset() - } else { - ts.first = false - } - - currentHeader, err := ts.tarR.Next() - if err != nil { - if err == io.EOF { - if err := ts.tarW.Close(); err != nil { - return 0, err - } - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - if err := ts.writer.Close(); err != nil { - return 0, err - } - ts.finished = true - return n, nil - } - return n, err - } - ts.currentFile = path.Clean(currentHeader.Name) - if err := ts.encodeHeader(currentHeader); err != nil { - return 0, err - } - if err := ts.tarW.WriteHeader(currentHeader); err != nil { - return 0, err - } - if _, err := ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) - } - return n, err - } - - // Filling the hash buffer - if _, err = ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - - // Filling the tar writer - if _, err = ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - - // Filling the output writer - if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) -} - -func (ts *tarSum) Sum(extra []byte) string { - ts.sums.SortBySums() - h := ts.tHash.Hash() - if extra != nil { - h.Write(extra) - } - for _, fis := range ts.sums { - h.Write([]byte(fis.Sum())) - } - checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) - return checksum -} - -func (ts *tarSum) GetSums() FileInfoSums { - return ts.sums -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go deleted file mode 100644 index 288228685..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go +++ /dev/null @@ -1,150 +0,0 @@ -package tarsum - -import ( - "archive/tar" - "errors" - "sort" - "strconv" - "strings" -) - -// Version is used for versioning of the TarSum algorithm -// based on the prefix of the hash used -// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" -type Version int - -// Prefix of "tarsum" -const ( - Version0 Version = iota - Version1 - // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation - VersionDev -) - -// VersionLabelForChecksum returns the label for the given tarsum -// checksum, i.e., everything before the first `+` character in -// the string or an empty string if no label separator is found. -func VersionLabelForChecksum(checksum string) string { - // Checksums are in the form: {versionLabel}+{hashID}:{hex} - sepIndex := strings.Index(checksum, "+") - if sepIndex < 0 { - return "" - } - return checksum[:sepIndex] -} - -// GetVersions gets a list of all known tarsum versions. -func GetVersions() []Version { - v := []Version{} - for k := range tarSumVersions { - v = append(v, k) - } - return v -} - -var ( - tarSumVersions = map[Version]string{ - Version0: "tarsum", - Version1: "tarsum.v1", - VersionDev: "tarsum.dev", - } - tarSumVersionsByName = map[string]Version{ - "tarsum": Version0, - "tarsum.v1": Version1, - "tarsum.dev": VersionDev, - } -) - -func (tsv Version) String() string { - return tarSumVersions[tsv] -} - -// GetVersionFromTarsum returns the Version from the provided string. -func GetVersionFromTarsum(tarsum string) (Version, error) { - tsv := tarsum - if strings.Contains(tarsum, "+") { - tsv = strings.SplitN(tarsum, "+", 2)[0] - } - for v, s := range tarSumVersions { - if s == tsv { - return v, nil - } - } - return -1, ErrNotVersion -} - -// Errors that may be returned by functions in this package -var ( - ErrNotVersion = errors.New("string does not include a TarSum Version") - ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") -) - -// tarHeaderSelector is the interface which different versions -// of tarsum should use for selecting and ordering tar headers -// for each item in the archive. -type tarHeaderSelector interface { - selectHeaders(h *tar.Header) (orderedHeaders [][2]string) -} - -type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) - -func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { - return f(h) -} - -func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - return [][2]string{ - {"name", h.Name}, - {"mode", strconv.FormatInt(h.Mode, 10)}, - {"uid", strconv.Itoa(h.Uid)}, - {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.FormatInt(h.Size, 10)}, - {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, - {"typeflag", string([]byte{h.Typeflag})}, - {"linkname", h.Linkname}, - {"uname", h.Uname}, - {"gname", h.Gname}, - {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, - {"devminor", strconv.FormatInt(h.Devminor, 10)}, - } -} - -func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - // Get extended attributes. - xAttrKeys := make([]string, len(h.Xattrs)) - for k := range h.Xattrs { - xAttrKeys = append(xAttrKeys, k) - } - sort.Strings(xAttrKeys) - - // Make the slice with enough capacity to hold the 11 basic headers - // we want from the v0 selector plus however many xattrs we have. - orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) - - // Copy all headers from v0 excluding the 'mtime' header (the 5th element). - v0headers := v0TarHeaderSelect(h) - orderedHeaders = append(orderedHeaders, v0headers[0:5]...) - orderedHeaders = append(orderedHeaders, v0headers[6:]...) - - // Finally, append the sorted xattrs. - for _, k := range xAttrKeys { - orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) - } - - return -} - -var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ - Version0: v0TarHeaderSelect, - Version1: v1TarHeaderSelect, - VersionDev: v1TarHeaderSelect, -} - -func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { - headerSelector, ok := registeredHeaderSelectors[v] - if !ok { - return nil, ErrVersionNotImplemented - } - - return headerSelector, nil -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go deleted file mode 100644 index 9727ecde3..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go +++ /dev/null @@ -1,22 +0,0 @@ -package tarsum - -import ( - "io" -) - -type writeCloseFlusher interface { - io.WriteCloser - Flush() error -} - -type nopCloseFlusher struct { - io.Writer -} - -func (n *nopCloseFlusher) Close() error { - return nil -} - -func (n *nopCloseFlusher) Flush() error { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go deleted file mode 100644 index f5262bccf..000000000 --- a/vendor/github.com/docker/docker/pkg/term/ascii.go +++ /dev/null @@ -1,66 +0,0 @@ -package term - -import ( - "fmt" - "strings" -) - -// ASCII list the possible supported ASCII key sequence -var ASCII = []string{ - "ctrl-@", - "ctrl-a", - "ctrl-b", - "ctrl-c", - "ctrl-d", - "ctrl-e", - "ctrl-f", - "ctrl-g", - "ctrl-h", - "ctrl-i", - "ctrl-j", - "ctrl-k", - "ctrl-l", - "ctrl-m", - "ctrl-n", - "ctrl-o", - "ctrl-p", - "ctrl-q", - "ctrl-r", - "ctrl-s", - "ctrl-t", - "ctrl-u", - "ctrl-v", - "ctrl-w", - "ctrl-x", - "ctrl-y", - "ctrl-z", - "ctrl-[", - "ctrl-\\", - "ctrl-]", - "ctrl-^", - "ctrl-_", -} - -// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. -func ToBytes(keys string) ([]byte, error) { - codes := []byte{} -next: - for _, key := range strings.Split(keys, ",") { - if len(key) != 1 { - for code, ctrl := range ASCII { - if ctrl == key { - codes = append(codes, byte(code)) - continue next - } - } - if key == "DEL" { - codes = append(codes, 127) - } else { - return nil, fmt.Errorf("Unknown character: '%s'", key) - } - } else { - codes = append(codes, byte(key[0])) - } - } - return codes, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go deleted file mode 100644 index 59dac5ba8..000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build linux,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_other.go b/vendor/github.com/docker/docker/pkg/term/tc_other.go deleted file mode 100644 index 750d7c3f6..000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_other.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows -// +build !linux !cgo -// +build !solaris !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -func tcget(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) - return err -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go deleted file mode 100644 index c9139d0ca..000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build solaris,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - /* - VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned - Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It - needs to be explicitly set to 1. - */ - newState.Cc[C.VMIN] = 1 - newState.Cc[C.VTIME] = 0 - - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go deleted file mode 100644 index fe59faa94..000000000 --- a/vendor/github.com/docker/docker/pkg/term/term.go +++ /dev/null @@ -1,123 +0,0 @@ -// +build !windows - -// Package term provides structures and helper functions to work with -// terminal (state, sizes). -package term - -import ( - "errors" - "fmt" - "io" - "os" - "os/signal" - "syscall" -) - -var ( - // ErrInvalidState is returned if the state of the terminal is invalid. - ErrInvalidState = errors.New("Invalid terminal state") -) - -// State represents the state of the terminal. -type State struct { - termios Termios -} - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stedrr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - var termios Termios - return tcget(fd, &termios) == 0 -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - if err := tcset(fd, &state.termios); err != 0 { - return err - } - return nil -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - return &oldState, nil -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= syscall.ECHO - - if err := tcset(fd, &newState); err != 0 { - return err - } - handleInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil -} - -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - go func() { - for range sigchan { - // quit cleanly and the new terminal item is on a new line - fmt.Println() - signal.Stop(sigchan) - close(sigchan) - RestoreTerminal(fd, state) - os.Exit(1) - } - }() -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_solaris.go b/vendor/github.com/docker/docker/pkg/term/term_solaris.go deleted file mode 100644 index 112debbec..000000000 --- a/vendor/github.com/docker/docker/pkg/term/term_solaris.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build solaris - -package term - -import ( - "syscall" - "unsafe" -) - -/* -#include -#include -#include - -// Small wrapper to get rid of variadic args of ioctl() -int my_ioctl(int fd, int cmd, struct winsize *ws) { - return ioctl(fd, cmd, ws); -} -*/ -import "C" - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return nil - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_unix.go b/vendor/github.com/docker/docker/pkg/term/term_unix.go deleted file mode 100644 index ddf87a0e5..000000000 --- a/vendor/github.com/docker/docker/pkg/term/term_unix.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !solaris,!windows - -package term - -import ( - "syscall" - "unsafe" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return nil - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go deleted file mode 100644 index a91f07e48..000000000 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ /dev/null @@ -1,233 +0,0 @@ -// +build windows - -package term - -import ( - "io" - "os" - "os/signal" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" - "github.com/docker/docker/pkg/term/windows" -) - -// State holds the console mode for the terminal. -type State struct { - mode uint32 -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 -} - -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - enableVirtualTerminalInput = 0x0200 - enableVirtualTerminalProcessing = 0x0004 - disableNewlineAutoReturn = 0x0008 -) - -// vtInputSupported is true if enableVirtualTerminalInput is supported by the console -var vtInputSupported bool - -// StdStreams returns the standard streams (stdin, stdout, stedrr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - // Turn on VT handling on all std handles, if possible. This might - // fail, in which case we will fall back to terminal emulation. - var emulateStdin, emulateStdout, emulateStderr bool - fd := os.Stdin.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate that enableVirtualTerminalInput is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil { - emulateStdin = true - } else { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - winterm.SetConsoleMode(fd, mode) - } - - fd = os.Stdout.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate disableNewlineAutoReturn is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { - emulateStdout = true - } else { - winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) - } - } - - fd = os.Stderr.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate disableNewlineAutoReturn is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { - emulateStderr = true - } else { - winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) - } - } - - if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" { - // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well. - emulateStdin = true - emulateStdout = false - emulateStderr = false - } - - if emulateStdin { - stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - if emulateStdout { - stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) - } else { - stdOut = os.Stdout - } - - if emulateStderr { - stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE) - } else { - stdErr = os.Stderr - } - - return -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - return windows.GetHandleInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - return windows.IsConsole(fd) -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - return winterm.SetConsoleMode(fd, state.mode) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - mode, e := winterm.GetConsoleMode(fd) - if e != nil { - return nil, e - } - - return &State{mode: mode}, nil -} - -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { - mode := state.mode - mode &^= winterm.ENABLE_ECHO_INPUT - mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT - err := winterm.SetConsoleMode(fd, mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - // Ignore failures, since disableNewlineAutoReturn might not be supported on this - // version of Windows. - winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.mode - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= winterm.ENABLE_ECHO_INPUT - mode &^= winterm.ENABLE_LINE_INPUT - mode &^= winterm.ENABLE_MOUSE_INPUT - mode &^= winterm.ENABLE_WINDOW_INPUT - mode &^= winterm.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= winterm.ENABLE_EXTENDED_FLAGS - mode |= winterm.ENABLE_INSERT_MODE - mode |= winterm.ENABLE_QUICK_EDIT_MODE - if vtInputSupported { - mode |= enableVirtualTerminalInput - } - - err = winterm.SetConsoleMode(fd, mode) - if err != nil { - return nil, err - } - return state, nil -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_darwin.go b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go deleted file mode 100644 index 480db900a..000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_darwin.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]byte - Ispeed uint64 - Ospeed uint64 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go deleted file mode 100644 index ed843ad69..000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go deleted file mode 100644 index 22921b6ae..000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TCGETS - setTermios = syscall.TCSETS -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go deleted file mode 100644 index ed843ad69..000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go deleted file mode 100644 index cb0b88356..000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go +++ /dev/null @@ -1,263 +0,0 @@ -// +build windows - -package windows - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a -// Windows console input handle. -func NewAnsiReader(nFile int) io.ReadCloser { - initLogger() - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - logger.Debugf("Reading previously cached bytes") - - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar.fd, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - logger.Debug("No input events detected") - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translator") - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("unexpected copy length encountered") - } - - logger.Debugf("Read p[%d]: % x", copiedLength, p) - logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } else if countRecords == 0 { - countRecords = 1 - } - logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go deleted file mode 100644 index a3ce5697d..000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build windows - -package windows - -import ( - "io" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a -// Windows console output handle. -func NewAnsiWriter(nFile int) io.Writer { - initLogger() - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - logger.Infof("newAnsiWriter: parser %p", parser) - - aw := &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } - - logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) - logger.Infof("newAnsiWriter: %v", aw) - return aw -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - logger.Infof("Write: % x", p) - logger.Infof("Write: %s", string(p)) - return aw.parser.Parse(p) -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go deleted file mode 100644 index ca5c3b2e5..000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/console.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build windows - -package windows - -import ( - "os" - - "github.com/Azure/go-ansiterm/winterm" -) - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = IsConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -func IsConsole(fd uintptr) bool { - _, e := winterm.GetConsoleMode(fd) - return e == nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go deleted file mode 100644 index ce4cb5990..000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windows - -import ( - "io/ioutil" - "os" - "sync" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger -var initOnce sync.Once - -func initLogger() { - initOnce.Do(func() { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - }) -} diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go deleted file mode 100644 index 44152873b..000000000 --- a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go +++ /dev/null @@ -1,50 +0,0 @@ -// Package urlutil provides helper function to check urls kind. -// It supports http urls, git urls and transport url (tcp://, …) -package urlutil - -import ( - "regexp" - "strings" -) - -var ( - validPrefixes = map[string][]string{ - "url": {"http://", "https://"}, - "git": {"git://", "github.com/", "git@"}, - "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, - } - urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") -) - -// IsURL returns true if the provided str is an HTTP(S) URL. -func IsURL(str string) bool { - return checkURL(str, "url") -} - -// IsGitURL returns true if the provided str is a git repository URL. -func IsGitURL(str string) bool { - if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { - return true - } - return checkURL(str, "git") -} - -// IsGitTransport returns true if the provided str is a git transport by inspecting -// the prefix of the string for known protocols used in git. -func IsGitTransport(str string) bool { - return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") -} - -// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. -func IsTransportURL(str string) bool { - return checkURL(str, "transport") -} - -func checkURL(str, kind string) bool { - for _, prefix := range validPrefixes[kind] { - if strings.HasPrefix(str, prefix) { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin.go b/vendor/github.com/docker/docker/plugin/v2/plugin.go deleted file mode 100644 index 93b489a14..000000000 --- a/vendor/github.com/docker/docker/plugin/v2/plugin.go +++ /dev/null @@ -1,244 +0,0 @@ -package v2 - -import ( - "fmt" - "strings" - "sync" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/plugins" -) - -// Plugin represents an individual plugin. -type Plugin struct { - mu sync.RWMutex - PluginObj types.Plugin `json:"plugin"` // todo: embed struct - pClient *plugins.Client - refCount int - PropagatedMount string // TODO: make private - Rootfs string // TODO: make private - - Config digest.Digest - Blobsums []digest.Digest -} - -const defaultPluginRuntimeDestination = "/run/docker/plugins" - -// ErrInadequateCapability indicates that the plugin did not have the requested capability. -type ErrInadequateCapability struct { - cap string -} - -func (e ErrInadequateCapability) Error() string { - return fmt.Sprintf("plugin does not provide %q capability", e.cap) -} - -// BasePath returns the path to which all paths returned by the plugin are relative to. -// For Plugin objects this returns the host path of the plugin container's rootfs. -func (p *Plugin) BasePath() string { - return p.Rootfs -} - -// Client returns the plugin client. -func (p *Plugin) Client() *plugins.Client { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.pClient -} - -// SetPClient set the plugin client. -func (p *Plugin) SetPClient(client *plugins.Client) { - p.mu.Lock() - defer p.mu.Unlock() - - p.pClient = client -} - -// IsV1 returns true for V1 plugins and false otherwise. -func (p *Plugin) IsV1() bool { - return false -} - -// Name returns the plugin name. -func (p *Plugin) Name() string { - return p.PluginObj.Name -} - -// FilterByCap query the plugin for a given capability. -func (p *Plugin) FilterByCap(capability string) (*Plugin, error) { - capability = strings.ToLower(capability) - for _, typ := range p.PluginObj.Config.Interface.Types { - if typ.Capability == capability && typ.Prefix == "docker" { - return p, nil - } - } - return nil, ErrInadequateCapability{capability} -} - -// InitEmptySettings initializes empty settings for a plugin. -func (p *Plugin) InitEmptySettings() { - p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts)) - copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts) - p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices)) - copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices) - p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env)) - for _, env := range p.PluginObj.Config.Env { - if env.Value != nil { - p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value)) - } - } - p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value)) - copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value) -} - -// Set is used to pass arguments to the plugin. -func (p *Plugin) Set(args []string) error { - p.mu.Lock() - defer p.mu.Unlock() - - if p.PluginObj.Enabled { - return fmt.Errorf("cannot set on an active plugin, disable plugin before setting") - } - - sets, err := newSettables(args) - if err != nil { - return err - } - - // TODO(vieux): lots of code duplication here, needs to be refactored. - -next: - for _, s := range sets { - // range over all the envs in the config - for _, env := range p.PluginObj.Config.Env { - // found the env in the config - if env.Name == s.name { - // is it settable ? - if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil { - return err - } else if !ok { - return fmt.Errorf("%q is not settable", s.prettyName()) - } - // is it, so lets update the settings in memory - updateSettingsEnv(&p.PluginObj.Settings.Env, &s) - continue next - } - } - - // range over all the mounts in the config - for _, mount := range p.PluginObj.Config.Mounts { - // found the mount in the config - if mount.Name == s.name { - // is it settable ? - if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil { - return err - } else if !ok { - return fmt.Errorf("%q is not settable", s.prettyName()) - } - - // it is, so lets update the settings in memory - *mount.Source = s.value - continue next - } - } - - // range over all the devices in the config - for _, device := range p.PluginObj.Config.Linux.Devices { - // found the device in the config - if device.Name == s.name { - // is it settable ? - if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil { - return err - } else if !ok { - return fmt.Errorf("%q is not settable", s.prettyName()) - } - - // it is, so lets update the settings in memory - *device.Path = s.value - continue next - } - } - - // found the name in the config - if p.PluginObj.Config.Args.Name == s.name { - // is it settable ? - if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil { - return err - } else if !ok { - return fmt.Errorf("%q is not settable", s.prettyName()) - } - - // it is, so lets update the settings in memory - p.PluginObj.Settings.Args = strings.Split(s.value, " ") - continue next - } - - return fmt.Errorf("setting %q not found in the plugin configuration", s.name) - } - - return nil -} - -// IsEnabled returns the active state of the plugin. -func (p *Plugin) IsEnabled() bool { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.PluginObj.Enabled -} - -// GetID returns the plugin's ID. -func (p *Plugin) GetID() string { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.PluginObj.ID -} - -// GetSocket returns the plugin socket. -func (p *Plugin) GetSocket() string { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.PluginObj.Config.Interface.Socket -} - -// GetTypes returns the interface types of a plugin. -func (p *Plugin) GetTypes() []types.PluginInterfaceType { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.PluginObj.Config.Interface.Types -} - -// GetRefCount returns the reference count. -func (p *Plugin) GetRefCount() int { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.refCount -} - -// AddRefCount adds to reference count. -func (p *Plugin) AddRefCount(count int) { - p.mu.Lock() - defer p.mu.Unlock() - - p.refCount += count -} - -// Acquire increments the plugin's reference count -// This should be followed up by `Release()` when the plugin is no longer in use. -func (p *Plugin) Acquire() { - p.AddRefCount(plugingetter.ACQUIRE) -} - -// Release decrements the plugin's reference count -// This should only be called when the plugin is no longer in use, e.g. with -// via `Acquire()` or getter.Get("name", "type", plugingetter.ACQUIRE) -func (p *Plugin) Release() { - p.AddRefCount(plugingetter.RELEASE) -} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go deleted file mode 100644 index f1c2da0bc..000000000 --- a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build linux - -package v2 - -import ( - "errors" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/oci" - "github.com/docker/docker/pkg/system" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// InitSpec creates an OCI spec from the plugin's config. -func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { - s := oci.DefaultSpec() - s.Root = specs.Root{ - Path: p.Rootfs, - Readonly: false, // TODO: all plugins should be readonly? settable in config? - } - - userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) - for _, m := range p.PluginObj.Settings.Mounts { - userMounts[m.Destination] = struct{}{} - } - - execRoot = filepath.Join(execRoot, p.PluginObj.ID) - if err := os.MkdirAll(execRoot, 0700); err != nil { - return nil, err - } - - mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ - Source: &execRoot, - Destination: defaultPluginRuntimeDestination, - Type: "bind", - Options: []string{"rbind", "rshared"}, - }) - - if p.PluginObj.Config.Network.Type != "" { - // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) - if p.PluginObj.Config.Network.Type == "host" { - oci.RemoveNamespace(&s, specs.NamespaceType("network")) - } - etcHosts := "/etc/hosts" - resolvConf := "/etc/resolv.conf" - mounts = append(mounts, - types.PluginMount{ - Source: &etcHosts, - Destination: etcHosts, - Type: "bind", - Options: []string{"rbind", "ro"}, - }, - types.PluginMount{ - Source: &resolvConf, - Destination: resolvConf, - Type: "bind", - Options: []string{"rbind", "ro"}, - }) - } - - for _, mnt := range mounts { - m := specs.Mount{ - Destination: mnt.Destination, - Type: mnt.Type, - Options: mnt.Options, - } - if mnt.Source == nil { - return nil, errors.New("mount source is not specified") - } - m.Source = *mnt.Source - s.Mounts = append(s.Mounts, m) - } - - for i, m := range s.Mounts { - if strings.HasPrefix(m.Destination, "/dev/") { - if _, ok := userMounts[m.Destination]; ok { - s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) - } - } - } - - if p.PluginObj.Config.PropagatedMount != "" { - p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) - s.Linux.RootfsPropagation = "rshared" - } - - if p.PluginObj.Config.Linux.AllowAllDevices { - rwm := "rwm" - s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}} - } - for _, dev := range p.PluginObj.Settings.Devices { - path := *dev.Path - d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") - if err != nil { - return nil, err - } - s.Linux.Devices = append(s.Linux.Devices, d...) - s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) - } - - envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) - envs[0] = "PATH=" + system.DefaultPathEnv - envs = append(envs, p.PluginObj.Settings.Env...) - - args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) - cwd := p.PluginObj.Config.WorkDir - if len(cwd) == 0 { - cwd = "/" - } - s.Process.Terminal = false - s.Process.Args = args - s.Process.Cwd = cwd - s.Process.Env = envs - - s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...) - - return &s, nil -} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go deleted file mode 100644 index e60fb8311..000000000 --- a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !linux - -package v2 - -import ( - "errors" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// InitSpec creates an OCI spec from the plugin's config. -func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { - return nil, errors.New("not supported") -} diff --git a/vendor/github.com/docker/docker/plugin/v2/settable.go b/vendor/github.com/docker/docker/plugin/v2/settable.go deleted file mode 100644 index 79c6befc2..000000000 --- a/vendor/github.com/docker/docker/plugin/v2/settable.go +++ /dev/null @@ -1,102 +0,0 @@ -package v2 - -import ( - "errors" - "fmt" - "strings" -) - -type settable struct { - name string - field string - value string -} - -var ( - allowedSettableFieldsEnv = []string{"value"} - allowedSettableFieldsArgs = []string{"value"} - allowedSettableFieldsDevices = []string{"path"} - allowedSettableFieldsMounts = []string{"source"} - - errMultipleFields = errors.New("multiple fields are settable, one must be specified") - errInvalidFormat = errors.New("invalid format, must be [.][=]") -) - -func newSettables(args []string) ([]settable, error) { - sets := make([]settable, 0, len(args)) - for _, arg := range args { - set, err := newSettable(arg) - if err != nil { - return nil, err - } - sets = append(sets, set) - } - return sets, nil -} - -func newSettable(arg string) (settable, error) { - var set settable - if i := strings.Index(arg, "="); i == 0 { - return set, errInvalidFormat - } else if i < 0 { - set.name = arg - } else { - set.name = arg[:i] - set.value = arg[i+1:] - } - - if i := strings.LastIndex(set.name, "."); i > 0 { - set.field = set.name[i+1:] - set.name = arg[:i] - } - - return set, nil -} - -// prettyName return name.field if there is a field, otherwise name. -func (set *settable) prettyName() string { - if set.field != "" { - return fmt.Sprintf("%s.%s", set.name, set.field) - } - return set.name -} - -func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) { - if set.field == "" { - if len(settable) == 1 { - // if field is not specified and there only one settable, default to it. - set.field = settable[0] - } else if len(settable) > 1 { - return false, errMultipleFields - } - } - - isAllowed := false - for _, allowedSettableField := range allowedSettableFields { - if set.field == allowedSettableField { - isAllowed = true - break - } - } - - if isAllowed { - for _, settableField := range settable { - if set.field == settableField { - return true, nil - } - } - } - - return false, nil -} - -func updateSettingsEnv(env *[]string, set *settable) { - for i, e := range *env { - if parts := strings.SplitN(e, "=", 2); parts[0] == set.name { - (*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value) - return - } - } - - *env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value)) -} diff --git a/vendor/github.com/docker/docker/reference/reference.go b/vendor/github.com/docker/docker/reference/reference.go deleted file mode 100644 index 996fc5070..000000000 --- a/vendor/github.com/docker/docker/reference/reference.go +++ /dev/null @@ -1,216 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/distribution/digest" - distreference "github.com/docker/distribution/reference" - "github.com/docker/docker/image/v1" -) - -const ( - // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified - DefaultTag = "latest" - // DefaultHostname is the default built-in hostname - DefaultHostname = "docker.io" - // LegacyDefaultHostname is automatically converted to DefaultHostname - LegacyDefaultHostname = "index.docker.io" - // DefaultRepoPrefix is the prefix used for default repositories in default host - DefaultRepoPrefix = "library/" -) - -// Named is an object with a full name -type Named interface { - // Name returns normalized repository name, like "ubuntu". - Name() string - // String returns full reference, like "ubuntu@sha256:abcdef..." - String() string - // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" - FullName() string - // Hostname returns hostname for the reference, like "docker.io" - Hostname() string - // RemoteName returns the repository component of the full name, like "library/ubuntu" - RemoteName() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Canonical reference is an object with a fully unique -// name including a name with hostname and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name, otherwise an error is -// returned. -// If an error was encountered it is returned, along with a nil Reference. -func ParseNamed(s string) (Named, error) { - named, err := distreference.ParseNamed(s) - if err != nil { - return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag: %s", s, err) - } - r, err := WithName(named.Name()) - if err != nil { - return nil, err - } - if canonical, isCanonical := named.(distreference.Canonical); isCanonical { - return WithDigest(r, canonical.Digest()) - } - if tagged, isTagged := named.(distreference.NamedTagged); isTagged { - return WithTag(r, tagged.Tag()) - } - return r, nil -} - -// TrimNamed removes any tag or digest from the named reference -func TrimNamed(ref Named) Named { - return &namedRef{distreference.TrimNamed(ref)} -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - name, err := normalize(name) - if err != nil { - return nil, err - } - if err := validateName(name); err != nil { - return nil, err - } - r, err := distreference.WithName(name) - if err != nil { - return nil, err - } - return &namedRef{r}, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - r, err := distreference.WithTag(name, tag) - if err != nil { - return nil, err - } - return &taggedRef{namedRef{r}}, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - r, err := distreference.WithDigest(name, digest) - if err != nil { - return nil, err - } - return &canonicalRef{namedRef{r}}, nil -} - -type namedRef struct { - distreference.Named -} -type taggedRef struct { - namedRef -} -type canonicalRef struct { - namedRef -} - -func (r *namedRef) FullName() string { - hostname, remoteName := splitHostname(r.Name()) - return hostname + "/" + remoteName -} -func (r *namedRef) Hostname() string { - hostname, _ := splitHostname(r.Name()) - return hostname -} -func (r *namedRef) RemoteName() string { - _, remoteName := splitHostname(r.Name()) - return remoteName -} -func (r *taggedRef) Tag() string { - return r.namedRef.Named.(distreference.NamedTagged).Tag() -} -func (r *canonicalRef) Digest() digest.Digest { - return r.namedRef.Named.(distreference.Canonical).Digest() -} - -// WithDefaultTag adds a default tag to a reference if it only has a repo name. -func WithDefaultTag(ref Named) Named { - if IsNameOnly(ref) { - ref, _ = WithTag(ref, DefaultTag) - } - return ref -} - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// ParseIDOrReference parses string for an image ID or a reference. ID can be -// without a default prefix. -func ParseIDOrReference(idOrRef string) (digest.Digest, Named, error) { - if err := v1.ValidateID(idOrRef); err == nil { - idOrRef = "sha256:" + idOrRef - } - if dgst, err := digest.ParseDigest(idOrRef); err == nil { - return dgst, nil, nil - } - ref, err := ParseNamed(idOrRef) - return "", ref, err -} - -// splitHostname splits a repository name to hostname and remotename string. -// If no valid hostname is found, the default hostname is used. Repository name -// needs to be already validated before. -func splitHostname(name string) (hostname, remoteName string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - hostname, remoteName = DefaultHostname, name - } else { - hostname, remoteName = name[:i], name[i+1:] - } - if hostname == LegacyDefaultHostname { - hostname = DefaultHostname - } - if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { - remoteName = DefaultRepoPrefix + remoteName - } - return -} - -// normalize returns a repository name in its normalized form, meaning it -// will not contain default hostname nor library/ prefix for official images. -func normalize(name string) (string, error) { - host, remoteName := splitHostname(name) - if strings.ToLower(remoteName) != remoteName { - return "", errors.New("invalid reference format: repository name must be lowercase") - } - if host == DefaultHostname { - if strings.HasPrefix(remoteName, DefaultRepoPrefix) { - return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil - } - return remoteName, nil - } - return name, nil -} - -func validateName(name string) error { - if err := v1.ValidateID(name); err == nil { - return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) - } - return nil -} diff --git a/vendor/github.com/docker/docker/reference/store.go b/vendor/github.com/docker/docker/reference/store.go deleted file mode 100644 index 71ca236c9..000000000 --- a/vendor/github.com/docker/docker/reference/store.go +++ /dev/null @@ -1,286 +0,0 @@ -package reference - -import ( - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - "sort" - "sync" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/ioutils" -) - -var ( - // ErrDoesNotExist is returned if a reference is not found in the - // store. - ErrDoesNotExist = errors.New("reference does not exist") -) - -// An Association is a tuple associating a reference with an image ID. -type Association struct { - Ref Named - ID digest.Digest -} - -// Store provides the set of methods which can operate on a tag store. -type Store interface { - References(id digest.Digest) []Named - ReferencesByName(ref Named) []Association - AddTag(ref Named, id digest.Digest, force bool) error - AddDigest(ref Canonical, id digest.Digest, force bool) error - Delete(ref Named) (bool, error) - Get(ref Named) (digest.Digest, error) -} - -type store struct { - mu sync.RWMutex - // jsonPath is the path to the file where the serialized tag data is - // stored. - jsonPath string - // Repositories is a map of repositories, indexed by name. - Repositories map[string]repository - // referencesByIDCache is a cache of references indexed by ID, to speed - // up References. - referencesByIDCache map[digest.Digest]map[string]Named -} - -// Repository maps tags to digests. The key is a stringified Reference, -// including the repository name. -type repository map[string]digest.Digest - -type lexicalRefs []Named - -func (a lexicalRefs) Len() int { return len(a) } -func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a lexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() } - -type lexicalAssociations []Association - -func (a lexicalAssociations) Len() int { return len(a) } -func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a lexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() } - -// NewReferenceStore creates a new reference store, tied to a file path where -// the set of references are serialized in JSON format. -func NewReferenceStore(jsonPath string) (Store, error) { - abspath, err := filepath.Abs(jsonPath) - if err != nil { - return nil, err - } - - store := &store{ - jsonPath: abspath, - Repositories: make(map[string]repository), - referencesByIDCache: make(map[digest.Digest]map[string]Named), - } - // Load the json file if it exists, otherwise create it. - if err := store.reload(); os.IsNotExist(err) { - if err := store.save(); err != nil { - return nil, err - } - } else if err != nil { - return nil, err - } - return store, nil -} - -// AddTag adds a tag reference to the store. If force is set to true, existing -// references can be overwritten. This only works for tags, not digests. -func (store *store) AddTag(ref Named, id digest.Digest, force bool) error { - if _, isCanonical := ref.(Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - return store.addReference(WithDefaultTag(ref), id, force) -} - -// AddDigest adds a digest reference to the store. -func (store *store) AddDigest(ref Canonical, id digest.Digest, force bool) error { - return store.addReference(ref, id, force) -} - -func (store *store) addReference(ref Named, id digest.Digest, force bool) error { - if ref.Name() == string(digest.Canonical) { - return errors.New("refusing to create an ambiguous tag using digest algorithm as name") - } - - store.mu.Lock() - defer store.mu.Unlock() - - repository, exists := store.Repositories[ref.Name()] - if !exists || repository == nil { - repository = make(map[string]digest.Digest) - store.Repositories[ref.Name()] = repository - } - - refStr := ref.String() - oldID, exists := repository[refStr] - - if exists { - // force only works for tags - if digested, isDigest := ref.(Canonical); isDigest { - return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) - } - - if !force { - return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) - } - - if store.referencesByIDCache[oldID] != nil { - delete(store.referencesByIDCache[oldID], refStr) - if len(store.referencesByIDCache[oldID]) == 0 { - delete(store.referencesByIDCache, oldID) - } - } - } - - repository[refStr] = id - if store.referencesByIDCache[id] == nil { - store.referencesByIDCache[id] = make(map[string]Named) - } - store.referencesByIDCache[id][refStr] = ref - - return store.save() -} - -// Delete deletes a reference from the store. It returns true if a deletion -// happened, or false otherwise. -func (store *store) Delete(ref Named) (bool, error) { - ref = WithDefaultTag(ref) - - store.mu.Lock() - defer store.mu.Unlock() - - repoName := ref.Name() - - repository, exists := store.Repositories[repoName] - if !exists { - return false, ErrDoesNotExist - } - - refStr := ref.String() - if id, exists := repository[refStr]; exists { - delete(repository, refStr) - if len(repository) == 0 { - delete(store.Repositories, repoName) - } - if store.referencesByIDCache[id] != nil { - delete(store.referencesByIDCache[id], refStr) - if len(store.referencesByIDCache[id]) == 0 { - delete(store.referencesByIDCache, id) - } - } - return true, store.save() - } - - return false, ErrDoesNotExist -} - -// Get retrieves an item from the store by reference -func (store *store) Get(ref Named) (digest.Digest, error) { - ref = WithDefaultTag(ref) - - store.mu.RLock() - defer store.mu.RUnlock() - - repository, exists := store.Repositories[ref.Name()] - if !exists || repository == nil { - return "", ErrDoesNotExist - } - - id, exists := repository[ref.String()] - if !exists { - return "", ErrDoesNotExist - } - - return id, nil -} - -// References returns a slice of references to the given ID. The slice -// will be nil if there are no references to this ID. -func (store *store) References(id digest.Digest) []Named { - store.mu.RLock() - defer store.mu.RUnlock() - - // Convert the internal map to an array for two reasons: - // 1) We must not return a mutable - // 2) It would be ugly to expose the extraneous map keys to callers. - - var references []Named - for _, ref := range store.referencesByIDCache[id] { - references = append(references, ref) - } - - sort.Sort(lexicalRefs(references)) - - return references -} - -// ReferencesByName returns the references for a given repository name. -// If there are no references known for this repository name, -// ReferencesByName returns nil. -func (store *store) ReferencesByName(ref Named) []Association { - store.mu.RLock() - defer store.mu.RUnlock() - - repository, exists := store.Repositories[ref.Name()] - if !exists { - return nil - } - - var associations []Association - for refStr, refID := range repository { - ref, err := ParseNamed(refStr) - if err != nil { - // Should never happen - return nil - } - associations = append(associations, - Association{ - Ref: ref, - ID: refID, - }) - } - - sort.Sort(lexicalAssociations(associations)) - - return associations -} - -func (store *store) save() error { - // Store the json - jsonData, err := json.Marshal(store) - if err != nil { - return err - } - return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600) -} - -func (store *store) reload() error { - f, err := os.Open(store.jsonPath) - if err != nil { - return err - } - defer f.Close() - if err := json.NewDecoder(f).Decode(&store); err != nil { - return err - } - - for _, repository := range store.Repositories { - for refStr, refID := range repository { - ref, err := ParseNamed(refStr) - if err != nil { - // Should never happen - continue - } - if store.referencesByIDCache[refID] == nil { - store.referencesByIDCache[refID] = make(map[string]Named) - } - store.referencesByIDCache[refID][refStr] = ref - } - } - - return nil -} diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go deleted file mode 100644 index 8cadd51ba..000000000 --- a/vendor/github.com/docker/docker/registry/auth.go +++ /dev/null @@ -1,303 +0,0 @@ -package registry - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" -) - -const ( - // AuthClientID is used the ClientID used for the token server - AuthClientID = "docker" -) - -// loginV1 tries to register/login to the v1 registry server. -func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) { - registryEndpoint, err := apiEndpoint.ToV1Endpoint(userAgent, nil) - if err != nil { - return "", "", err - } - - serverAddress := registryEndpoint.String() - - logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress) - - if serverAddress == "" { - return "", "", fmt.Errorf("Server Error: Server Address not set.") - } - - loginAgainstOfficialIndex := serverAddress == IndexServer - - req, err := http.NewRequest("GET", serverAddress+"users/", nil) - if err != nil { - return "", "", err - } - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := registryEndpoint.client.Do(req) - if err != nil { - // fallback when request could not be completed - return "", "", fallbackError{ - err: err, - } - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", "", err - } - if resp.StatusCode == http.StatusOK { - return "Login Succeeded", "", nil - } else if resp.StatusCode == http.StatusUnauthorized { - if loginAgainstOfficialIndex { - return "", "", fmt.Errorf("Wrong login/password, please try again. Haven't got a Docker ID? Create one at https://hub.docker.com") - } - return "", "", fmt.Errorf("Wrong login/password, please try again") - } else if resp.StatusCode == http.StatusForbidden { - if loginAgainstOfficialIndex { - return "", "", fmt.Errorf("Login: Account is not active. Please check your e-mail for a confirmation link.") - } - // *TODO: Use registry configuration to determine what this says, if anything? - return "", "", fmt.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) - } else if resp.StatusCode == http.StatusInternalServerError { // Issue #14326 - logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) - return "", "", fmt.Errorf("Internal Server Error") - } - return "", "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, - resp.StatusCode, resp.Header) -} - -type loginCredentialStore struct { - authConfig *types.AuthConfig -} - -func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { - return lcs.authConfig.Username, lcs.authConfig.Password -} - -func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { - return lcs.authConfig.IdentityToken -} - -func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { - lcs.authConfig.IdentityToken = token -} - -type staticCredentialStore struct { - auth *types.AuthConfig -} - -// NewStaticCredentialStore returns a credential store -// which always returns the same credential values. -func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { - return staticCredentialStore{ - auth: auth, - } -} - -func (scs staticCredentialStore) Basic(*url.URL) (string, string) { - if scs.auth == nil { - return "", "" - } - return scs.auth.Username, scs.auth.Password -} - -func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { - if scs.auth == nil { - return "" - } - return scs.auth.IdentityToken -} - -func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { -} - -type fallbackError struct { - err error -} - -func (err fallbackError) Error() string { - return err.err.Error() -} - -// loginV2 tries to login to the v2 registry server. The given registry -// endpoint will be pinged to get authorization challenges. These challenges -// will be used to authenticate against the registry to validate credentials. -func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { - logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/") - - modifiers := DockerHeaders(userAgent, nil) - authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) - - credentialAuthConfig := *authConfig - creds := loginCredentialStore{ - authConfig: &credentialAuthConfig, - } - - loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) - if err != nil { - return "", "", err - } - - endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err - } - - resp, err := loginClient.Do(req) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - // TODO(dmcgowan): Attempt to further interpret result, status code and error code string - err := fmt.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err - } - - return "Login Succeeded", credentialAuthConfig.IdentityToken, nil - -} - -func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { - challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return nil, foundV2, err - } - - tokenHandlerOptions := auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - OfflineAccess: true, - ClientID: AuthClientID, - Scopes: scopes, - } - tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) - tr := transport.NewTransport(authTransport, modifiers...) - - return &http.Client{ - Transport: tr, - Timeout: 15 * time.Second, - }, foundV2, nil - -} - -// ConvertToHostname converts a registry url which has http|https prepended -// to just an hostname. -func ConvertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} - -// ResolveAuthConfig matches an auth configuration to a server address or a URL -func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := GetAuthConfigKey(index) - // First try the happy case - if c, found := authConfigs[configKey]; found || index.Official { - return c - } - - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for registry, ac := range authConfigs { - if configKey == ConvertToHostname(registry) { - return ac - } - } - - // When all else fails, return an empty auth config - return types.AuthConfig{} -} - -// PingResponseError is used when the response from a ping -// was received but invalid. -type PingResponseError struct { - Err error -} - -func (err PingResponseError) Error() string { - return err.Err.Error() -} - -// PingV2Registry attempts to ping a v2 registry and on success return a -// challenge manager for the supported authentication types and -// whether v2 was confirmed by the response. If a response is received but -// cannot be interpreted a PingResponseError will be returned. -func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { - var ( - foundV2 = false - v2Version = auth.APIVersion{ - Type: "registry", - Version: "2.0", - } - ) - - pingClient := &http.Client{ - Transport: transport, - Timeout: 15 * time.Second, - } - endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - return nil, false, err - } - resp, err := pingClient.Do(req) - if err != nil { - return nil, false, err - } - defer resp.Body.Close() - - versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) - for _, pingVersion := range versions { - if pingVersion == v2Version { - // The version header indicates we're definitely - // talking to a v2 registry. So don't allow future - // fallbacks to the v1 protocol. - - foundV2 = true - break - } - } - - challengeManager := challenge.NewSimpleManager() - if err := challengeManager.AddResponse(resp); err != nil { - return nil, foundV2, PingResponseError{ - Err: err, - } - } - - return challengeManager, foundV2, nil -} diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go deleted file mode 100644 index 9a4f6a925..000000000 --- a/vendor/github.com/docker/docker/registry/config.go +++ /dev/null @@ -1,305 +0,0 @@ -package registry - -import ( - "errors" - "fmt" - "net" - "net/url" - "strings" - - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/opts" - "github.com/docker/docker/reference" - "github.com/spf13/pflag" -) - -// ServiceOptions holds command line options. -type ServiceOptions struct { - Mirrors []string `json:"registry-mirrors,omitempty"` - InsecureRegistries []string `json:"insecure-registries,omitempty"` - - // V2Only controls access to legacy registries. If it is set to true via the - // command line flag the daemon will not attempt to contact v1 legacy registries - V2Only bool `json:"disable-legacy-registry,omitempty"` -} - -// serviceConfig holds daemon configuration for the registry service. -type serviceConfig struct { - registrytypes.ServiceConfig - V2Only bool -} - -var ( - // DefaultNamespace is the default namespace - DefaultNamespace = "docker.io" - // DefaultRegistryVersionHeader is the name of the default HTTP header - // that carries Registry version info - DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" - - // IndexHostname is the index hostname - IndexHostname = "index.docker.io" - // IndexServer is used for user auth and image search - IndexServer = "https://" + IndexHostname + "/v1/" - // IndexName is the name of the index - IndexName = "docker.io" - - // NotaryServer is the endpoint serving the Notary trust server - NotaryServer = "https://notary.docker.io" - - // DefaultV2Registry is the URI of the default v2 registry - DefaultV2Registry = &url.URL{ - Scheme: "https", - Host: "registry-1.docker.io", - } -) - -var ( - // ErrInvalidRepositoryName is an error returned if the repository name did - // not have the correct form - ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") - - emptyServiceConfig = newServiceConfig(ServiceOptions{}) -) - -// for mocking in unit tests -var lookupIP = net.LookupIP - -// InstallCliFlags adds command-line options to the top-level flag parser for -// the current process. -func (options *ServiceOptions) InstallCliFlags(flags *pflag.FlagSet) { - mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, ValidateMirror) - insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, ValidateIndexName) - - flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") - flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") - - options.installCliPlatformFlags(flags) -} - -// newServiceConfig returns a new instance of ServiceConfig -func newServiceConfig(options ServiceOptions) *serviceConfig { - config := &serviceConfig{ - ServiceConfig: registrytypes.ServiceConfig{ - InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), - IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), - // Hack: Bypass setting the mirrors to IndexConfigs since they are going away - // and Mirrors are only for the official registry anyways. - Mirrors: options.Mirrors, - }, - V2Only: options.V2Only, - } - - config.LoadInsecureRegistries(options.InsecureRegistries) - - return config -} - -// LoadInsecureRegistries loads insecure registries to config -func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { - // Localhost is by default considered as an insecure registry - // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). - // - // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change - // daemon flags on boot2docker? - registries = append(registries, "127.0.0.0/8") - - // Store original InsecureRegistryCIDRs and IndexConfigs - // Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info. - originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs - originalIndexInfos := config.ServiceConfig.IndexConfigs - - config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) - config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo, 0) - -skip: - for _, r := range registries { - // validate insecure registry - if _, err := ValidateIndexName(r); err != nil { - // before returning err, roll back to original data - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return err - } - // Check if CIDR was passed to --insecure-registry - _, ipnet, err := net.ParseCIDR(r) - if err == nil { - // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. - data := (*registrytypes.NetIPNet)(ipnet) - for _, value := range config.InsecureRegistryCIDRs { - if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { - continue skip - } - } - // ipnet is not found, add it in config.InsecureRegistryCIDRs - config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) - - } else { - // Assume `host:port` if not CIDR. - config.IndexConfigs[r] = ®istrytypes.IndexInfo{ - Name: r, - Mirrors: make([]string, 0), - Secure: false, - Official: false, - } - } - } - - // Configure public registry. - config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - - return nil -} - -// isSecureIndex returns false if the provided indexName is part of the list of insecure registries -// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. -// -// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. -// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered -// insecure. -// -// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained -// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element -// of insecureRegistries. -func isSecureIndex(config *serviceConfig, indexName string) bool { - // Check for configured index, first. This is needed in case isSecureIndex - // is called from anything besides newIndexInfo, in order to honor per-index configurations. - if index, ok := config.IndexConfigs[indexName]; ok { - return index.Secure - } - - host, _, err := net.SplitHostPort(indexName) - if err != nil { - // assume indexName is of the form `host` without the port and go on. - host = indexName - } - - addrs, err := lookupIP(host) - if err != nil { - ip := net.ParseIP(host) - if ip != nil { - addrs = []net.IP{ip} - } - - // if ip == nil, then `host` is neither an IP nor it could be looked up, - // either because the index is unreachable, or because the index is behind an HTTP proxy. - // So, len(addrs) == 0 and we're not aborting. - } - - // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. - for _, addr := range addrs { - for _, ipnet := range config.InsecureRegistryCIDRs { - // check if the addr falls in the subnet - if (*net.IPNet)(ipnet).Contains(addr) { - return false - } - } - } - - return true -} - -// ValidateMirror validates an HTTP(S) registry mirror -func ValidateMirror(val string) (string, error) { - uri, err := url.Parse(val) - if err != nil { - return "", fmt.Errorf("%s is not a valid URI", val) - } - - if uri.Scheme != "http" && uri.Scheme != "https" { - return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) - } - - if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { - return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") - } - - return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil -} - -// ValidateIndexName validates an index name. -func ValidateIndexName(val string) (string, error) { - if val == reference.LegacyDefaultHostname { - val = reference.DefaultHostname - } - if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { - return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) - } - return val, nil -} - -func validateNoScheme(reposName string) error { - if strings.Contains(reposName, "://") { - // It cannot contain a scheme! - return ErrInvalidRepositoryName - } - return nil -} - -// newIndexInfo returns IndexInfo configuration from indexName -func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { - var err error - indexName, err = ValidateIndexName(indexName) - if err != nil { - return nil, err - } - - // Return any configured index info, first. - if index, ok := config.IndexConfigs[indexName]; ok { - return index, nil - } - - // Construct a non-configured index info. - index := ®istrytypes.IndexInfo{ - Name: indexName, - Mirrors: make([]string, 0), - Official: false, - } - index.Secure = isSecureIndex(config, indexName) - return index, nil -} - -// GetAuthConfigKey special-cases using the full index address of the official -// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. -func GetAuthConfigKey(index *registrytypes.IndexInfo) string { - if index.Official { - return IndexServer - } - return index.Name -} - -// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo -func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { - index, err := newIndexInfo(config, name.Hostname()) - if err != nil { - return nil, err - } - official := !strings.ContainsRune(name.Name(), '/') - return &RepositoryInfo{ - Named: name, - Index: index, - Official: official, - }, nil -} - -// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but -// lacks registry configuration. -func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { - return newRepositoryInfo(emptyServiceConfig, reposName) -} - -// ParseSearchIndexInfo will use repository name to get back an indexInfo. -func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { - indexName, _ := splitReposSearchTerm(reposName) - - indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) - if err != nil { - return nil, err - } - return indexInfo, nil -} diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go deleted file mode 100644 index d692e8ef5..000000000 --- a/vendor/github.com/docker/docker/registry/config_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build !windows - -package registry - -import ( - "github.com/spf13/pflag" -) - -var ( - // CertsDir is the directory where certificates are stored - CertsDir = "/etc/docker/certs.d" -) - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:/index.docker.io/v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return s -} - -// installCliPlatformFlags handles any platform specific flags for the service. -func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { - flags.BoolVar(&options.V2Only, "disable-legacy-registry", false, "Disable contacting legacy registries") -} diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go deleted file mode 100644 index d1b313dc1..000000000 --- a/vendor/github.com/docker/docker/registry/config_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -package registry - -import ( - "os" - "path/filepath" - "strings" - - "github.com/spf13/pflag" -) - -// CertsDir is the directory where certificates are stored -var CertsDir = os.Getenv("programdata") + `\docker\certs.d` - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:\index.docker.io\v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return filepath.FromSlash(strings.Replace(s, ":", "", -1)) -} - -// installCliPlatformFlags handles any platform specific flags for the service. -func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { - // No Windows specific flags. -} diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go deleted file mode 100644 index 6bcf8c935..000000000 --- a/vendor/github.com/docker/docker/registry/endpoint_v1.go +++ /dev/null @@ -1,198 +0,0 @@ -package registry - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/transport" - registrytypes "github.com/docker/docker/api/types/registry" -) - -// V1Endpoint stores basic information about a V1 registry endpoint. -type V1Endpoint struct { - client *http.Client - URL *url.URL - IsSecure bool -} - -// NewV1Endpoint parses the given address to return a registry endpoint. -func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - tlsConfig, err := newTLSConfig(index.Name, index.Secure) - if err != nil { - return nil, err - } - - endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) - if err != nil { - return nil, err - } - - if err := validateEndpoint(endpoint); err != nil { - return nil, err - } - - return endpoint, nil -} - -func validateEndpoint(endpoint *V1Endpoint) error { - logrus.Debugf("pinging registry endpoint %s", endpoint) - - // Try HTTPS ping to registry - endpoint.URL.Scheme = "https" - if _, err := endpoint.Ping(); err != nil { - if endpoint.IsSecure { - // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` - // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. - return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) - } - - // If registry is insecure and HTTPS failed, fallback to HTTP. - logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) - endpoint.URL.Scheme = "http" - - var err2 error - if _, err2 = endpoint.Ping(); err2 == nil { - return nil - } - - return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) - } - - return nil -} - -func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - endpoint := &V1Endpoint{ - IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify), - URL: new(url.URL), - } - - *endpoint.URL = address - - // TODO(tiborvass): make sure a ConnectTimeout transport is used - tr := NewTransport(tlsConfig) - endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...)) - return endpoint, nil -} - -// trimV1Address trims the version off the address and returns the -// trimmed address or an error if there is a non-V1 version. -func trimV1Address(address string) (string, error) { - var ( - chunks []string - apiVersionStr string - ) - - if strings.HasSuffix(address, "/") { - address = address[:len(address)-1] - } - - chunks = strings.Split(address, "/") - apiVersionStr = chunks[len(chunks)-1] - if apiVersionStr == "v1" { - return strings.Join(chunks[:len(chunks)-1], "/"), nil - } - - for k, v := range apiVersions { - if k != APIVersion1 && apiVersionStr == v { - return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) - } - } - - return address, nil -} - -func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { - address = "https://" + address - } - - address, err := trimV1Address(address) - if err != nil { - return nil, err - } - - uri, err := url.Parse(address) - if err != nil { - return nil, err - } - - endpoint, err := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -// Get the formatted URL for the root of this registry Endpoint -func (e *V1Endpoint) String() string { - return e.URL.String() + "/v1/" -} - -// Path returns a formatted string for the URL -// of this endpoint with the given path appended. -func (e *V1Endpoint) Path(path string) string { - return e.URL.String() + "/v1/" + path -} - -// Ping returns a PingResult which indicates whether the registry is standalone or not. -func (e *V1Endpoint) Ping() (PingResult, error) { - logrus.Debugf("attempting v1 ping for registry endpoint %s", e) - - if e.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fallback to http in case of error) - return PingResult{Standalone: false}, nil - } - - req, err := http.NewRequest("GET", e.Path("_ping"), nil) - if err != nil { - return PingResult{Standalone: false}, err - } - - resp, err := e.client.Do(req) - if err != nil { - return PingResult{Standalone: false}, err - } - - defer resp.Body.Close() - - jsonString, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) - } - - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry. default to true - info := PingResult{ - Standalone: true, - } - if err := json.Unmarshal(jsonString, &info); err != nil { - logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) - // don't stop here. Just assume sane defaults - } - if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { - logrus.Debugf("Registry version header: '%s'", hdr) - info.Version = hdr - } - logrus.Debugf("PingResult.Version: %q", info.Version) - - standalone := resp.Header.Get("X-Docker-Registry-Standalone") - logrus.Debugf("Registry standalone header: '%s'", standalone) - // Accepted values are "true" (case-insensitive) and "1". - if strings.EqualFold(standalone, "true") || standalone == "1" { - info.Standalone = true - } else if len(standalone) > 0 { - // there is a header set, and it is not "true" or "1", so assume fails - info.Standalone = false - } - logrus.Debugf("PingResult.Standalone: %t", info.Standalone) - return info, nil -} diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go deleted file mode 100644 index 17fa97ce3..000000000 --- a/vendor/github.com/docker/docker/registry/registry.go +++ /dev/null @@ -1,191 +0,0 @@ -// Package registry contains client primitives to interact with a remote Docker registry. -package registry - -import ( - "crypto/tls" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" -) - -var ( - // ErrAlreadyExists is an error returned if an image being pushed - // already exists on the remote side - ErrAlreadyExists = errors.New("Image already exists") -) - -func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { - // PreferredServerCipherSuites should have no effect - tlsConfig := tlsconfig.ServerDefault() - - tlsConfig.InsecureSkipVerify = !isSecure - - if isSecure && CertsDir != "" { - hostDir := filepath.Join(CertsDir, cleanPath(hostname)) - logrus.Debugf("hostDir: %s", hostDir) - if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { - return nil, err - } - } - - return tlsConfig, nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// ReadCertsDirectory reads the directory for TLS certificates -// including roots and certificate pairs and updates the -// provided TLS configuration. -func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { - fs, err := ioutil.ReadDir(directory) - if err != nil && !os.IsNotExist(err) { - return err - } - - for _, f := range fs { - if strings.HasSuffix(f.Name(), ".crt") { - if tlsConfig.RootCAs == nil { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return fmt.Errorf("unable to get system cert pool: %v", err) - } - tlsConfig.RootCAs = systemPool - } - logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) - data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) - if err != nil { - return err - } - tlsConfig.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, keyName) { - return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) - if err != nil { - return err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, certName) { - return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) - } - } - } - - return nil -} - -// DockerHeaders returns request modifiers with a User-Agent and metaHeaders -func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier { - modifiers := []transport.RequestModifier{} - if userAgent != "" { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ - "User-Agent": []string{userAgent}, - })) - } - if metaHeaders != nil { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) - } - return modifiers -} - -// HTTPClient returns an HTTP client structure which uses the given transport -// and contains the necessary headers for redirected requests -func HTTPClient(transport http.RoundTripper) *http.Client { - return &http.Client{ - Transport: transport, - CheckRedirect: addRequiredHeadersToRedirectedRequests, - } -} - -func trustedLocation(req *http.Request) bool { - var ( - trusteds = []string{"docker.com", "docker.io"} - hostname = strings.SplitN(req.Host, ":", 2)[0] - ) - if req.URL.Scheme != "https" { - return false - } - - for _, trusted := range trusteds { - if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { - return true - } - } - return false -} - -// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers -// for redirected requests -func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if via != nil && via[0] != nil { - if trustedLocation(req) && trustedLocation(via[0]) { - req.Header = via[0].Header - return nil - } - for k, v := range via[0].Header { - if k != "Authorization" { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - } - } - return nil -} - -// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the -// default TLS configuration. -func NewTransport(tlsConfig *tls.Config) *http.Transport { - if tlsConfig == nil { - tlsConfig = tlsconfig.ServerDefault() - } - - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: direct.Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - - proxyDialer, err := sockets.DialerFromEnvironment(direct) - if err == nil { - base.Dial = proxyDialer.Dial - } - return base -} diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go deleted file mode 100644 index 596a9c7e5..000000000 --- a/vendor/github.com/docker/docker/registry/service.go +++ /dev/null @@ -1,304 +0,0 @@ -package registry - -import ( - "crypto/tls" - "fmt" - "net/http" - "net/url" - "strings" - "sync" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/reference" -) - -const ( - // DefaultSearchLimit is the default value for maximum number of returned search results. - DefaultSearchLimit = 25 -) - -// Service is the interface defining what a registry service should implement. -type Service interface { - Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) - LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) - LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) - ResolveRepository(name reference.Named) (*RepositoryInfo, error) - Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) - ServiceConfig() *registrytypes.ServiceConfig - TLSConfig(hostname string) (*tls.Config, error) - LoadInsecureRegistries([]string) error -} - -// DefaultService is a registry service. It tracks configuration data such as a list -// of mirrors. -type DefaultService struct { - config *serviceConfig - mu sync.Mutex -} - -// NewService returns a new instance of DefaultService ready to be -// installed into an engine. -func NewService(options ServiceOptions) *DefaultService { - return &DefaultService{ - config: newServiceConfig(options), - } -} - -// ServiceConfig returns the public registry service configuration. -func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { - s.mu.Lock() - defer s.mu.Unlock() - - servConfig := registrytypes.ServiceConfig{ - InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), - IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), - Mirrors: make([]string, 0), - } - - // construct a new ServiceConfig which will not retrieve s.Config directly, - // and look up items in s.config with mu locked - servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) - - for key, value := range s.config.ServiceConfig.IndexConfigs { - servConfig.IndexConfigs[key] = value - } - - servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...) - - return &servConfig -} - -// LoadInsecureRegistries loads insecure registries for Service -func (s *DefaultService) LoadInsecureRegistries(registries []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.LoadInsecureRegistries(registries) -} - -// Auth contacts the public registry with the provided credentials, -// and returns OK if authentication was successful. -// It can be used to verify the validity of a client's credentials. -func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { - // TODO Use ctx when searching for repositories - serverAddress := authConfig.ServerAddress - if serverAddress == "" { - serverAddress = IndexServer - } - if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { - serverAddress = "https://" + serverAddress - } - u, err := url.Parse(serverAddress) - if err != nil { - return "", "", fmt.Errorf("unable to parse server address: %v", err) - } - - endpoints, err := s.LookupPushEndpoints(u.Host) - if err != nil { - return "", "", err - } - - for _, endpoint := range endpoints { - login := loginV2 - if endpoint.Version == APIVersion1 { - login = loginV1 - } - - status, token, err = login(authConfig, endpoint, userAgent) - if err == nil { - return - } - if fErr, ok := err.(fallbackError); ok { - err = fErr.err - logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err) - continue - } - return "", "", err - } - - return "", "", err -} - -// splitReposSearchTerm breaks a search term into an index name and remote name -func splitReposSearchTerm(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - var indexName, remoteName string - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { - // This is a Docker Index repos (ex: samalba/hipache or ubuntu) - // 'docker.io' - indexName = IndexName - remoteName = reposName - } else { - indexName = nameParts[0] - remoteName = nameParts[1] - } - return indexName, remoteName -} - -// Search queries the public registry for images matching the specified -// search terms, and returns the results. -func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { - // TODO Use ctx when searching for repositories - if err := validateNoScheme(term); err != nil { - return nil, err - } - - indexName, remoteName := splitReposSearchTerm(term) - - // Search is a long-running operation, just lock s.config to avoid block others. - s.mu.Lock() - index, err := newIndexInfo(s.config, indexName) - s.mu.Unlock() - - if err != nil { - return nil, err - } - - // *TODO: Search multiple indexes. - endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers)) - if err != nil { - return nil, err - } - - var client *http.Client - if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { - creds := NewStaticCredentialStore(authConfig) - scopes := []auth.Scope{ - auth.RegistryScope{ - Name: "catalog", - Actions: []string{"search"}, - }, - } - - modifiers := DockerHeaders(userAgent, nil) - v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) - if err != nil { - if fErr, ok := err.(fallbackError); ok { - logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) - } else { - return nil, err - } - } else if foundV2 { - // Copy non transport http client features - v2Client.Timeout = endpoint.client.Timeout - v2Client.CheckRedirect = endpoint.client.CheckRedirect - v2Client.Jar = endpoint.client.Jar - - logrus.Debugf("using v2 client for search to %s", endpoint.URL) - client = v2Client - } - } - - if client == nil { - client = endpoint.client - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - } - - r := newSession(client, authConfig, endpoint) - - if index.Official { - localName := remoteName - if strings.HasPrefix(localName, "library/") { - // If pull "library/foo", it's stored locally under "foo" - localName = strings.SplitN(localName, "/", 2)[1] - } - - return r.SearchRepositories(localName, limit) - } - return r.SearchRepositories(remoteName, limit) -} - -// ResolveRepository splits a repository name into its components -// and configuration of the associated registry. -func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { - s.mu.Lock() - defer s.mu.Unlock() - return newRepositoryInfo(s.config, name) -} - -// APIEndpoint represents a remote API endpoint -type APIEndpoint struct { - Mirror bool - URL *url.URL - Version APIVersion - Official bool - TrimHostname bool - TLSConfig *tls.Config -} - -// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint -func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) -} - -// TLSConfig constructs a client TLS configuration based on server defaults -func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { - s.mu.Lock() - defer s.mu.Unlock() - - return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) -} - -// tlsConfig constructs a client TLS configuration based on server defaults -func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { - return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) -} - -func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { - return s.tlsConfig(mirrorURL.Host) -} - -// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference. -// It gives preference to v2 endpoints over v1, mirrors over the actual -// registry, and HTTPS over plain HTTP. -func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - return s.lookupEndpoints(hostname) -} - -// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference. -// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. -// Mirrors are not included. -func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - allEndpoints, err := s.lookupEndpoints(hostname) - if err == nil { - for _, endpoint := range allEndpoints { - if !endpoint.Mirror { - endpoints = append(endpoints, endpoint) - } - } - } - return endpoints, err -} - -func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - endpoints, err = s.lookupV2Endpoints(hostname) - if err != nil { - return nil, err - } - - if s.config.V2Only { - return endpoints, nil - } - - legacyEndpoints, err := s.lookupV1Endpoints(hostname) - if err != nil { - return nil, err - } - endpoints = append(endpoints, legacyEndpoints...) - - return endpoints, nil -} diff --git a/vendor/github.com/docker/docker/registry/service_v1.go b/vendor/github.com/docker/docker/registry/service_v1.go deleted file mode 100644 index 1d251aec6..000000000 --- a/vendor/github.com/docker/docker/registry/service_v1.go +++ /dev/null @@ -1,40 +0,0 @@ -package registry - -import "net/url" - -func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) { - if hostname == DefaultNamespace || hostname == DefaultV2Registry.Host || hostname == IndexHostname { - return []APIEndpoint{}, nil - } - - tlsConfig, err := s.tlsConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - Version: APIVersion1, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ // or this - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - Version: APIVersion1, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - return endpoints, nil -} diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go deleted file mode 100644 index 228d745f8..000000000 --- a/vendor/github.com/docker/docker/registry/service_v2.go +++ /dev/null @@ -1,78 +0,0 @@ -package registry - -import ( - "net/url" - "strings" - - "github.com/docker/go-connections/tlsconfig" -) - -func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { - tlsConfig := tlsconfig.ServerDefault() - if hostname == DefaultNamespace || hostname == IndexHostname { - // v2 mirrors - for _, mirror := range s.config.Mirrors { - if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { - mirror = "https://" + mirror - } - mirrorURL, err := url.Parse(mirror) - if err != nil { - return nil, err - } - mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) - if err != nil { - return nil, err - } - endpoints = append(endpoints, APIEndpoint{ - URL: mirrorURL, - // guess mirrors are v2 - Version: APIVersion2, - Mirror: true, - TrimHostname: true, - TLSConfig: mirrorTLSConfig, - }) - } - // v2 registry - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV2Registry, - Version: APIVersion2, - Official: true, - TrimHostname: true, - TLSConfig: tlsConfig, - }) - - return endpoints, nil - } - - tlsConfig, err = s.tlsConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - Version: APIVersion2, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - Version: APIVersion2, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - - return endpoints, nil -} diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go deleted file mode 100644 index 72e286ab4..000000000 --- a/vendor/github.com/docker/docker/registry/session.go +++ /dev/null @@ -1,783 +0,0 @@ -package registry - -import ( - "bytes" - "crypto/sha256" - "errors" - "sync" - // this is required for some certificates - _ "crypto/sha512" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/cookiejar" - "net/url" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/reference" -) - -var ( - // ErrRepoNotFound is returned if the repository didn't exist on the - // remote side - ErrRepoNotFound = errors.New("Repository not found") -) - -// A Session is used to communicate with a V1 registry -type Session struct { - indexEndpoint *V1Endpoint - client *http.Client - // TODO(tiborvass): remove authConfig - authConfig *types.AuthConfig - id string -} - -type authTransport struct { - http.RoundTripper - *types.AuthConfig - - alwaysSetBasicAuth bool - token []string - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) -// -// For private v1 registries, set alwaysSetBasicAuth to true. -// -// For the official v1 registry, if there isn't already an Authorization header in the request, -// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. -// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing -// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent -// requests. -// -// If the server sends a token without the client having requested it, it is ignored. -// -// This RoundTripper also has a CancelRequest method important for correct timeout handling. -func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { - if base == nil { - base = http.DefaultTransport - } - return &authTransport{ - RoundTripper: base, - AuthConfig: authConfig, - alwaysSetBasicAuth: alwaysSetBasicAuth, - modReq: make(map[*http.Request]*http.Request), - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -// RoundTrip changes an HTTP request's headers to add the necessary -// authentication-related headers -func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { - // Authorization should not be set on 302 redirect for untrusted locations. - // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. - // As the authorization logic is currently implemented in RoundTrip, - // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. - // This is safe as Docker doesn't set Referrer in other scenarios. - if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { - return tr.RoundTripper.RoundTrip(orig) - } - - req := cloneRequest(orig) - tr.mu.Lock() - tr.modReq[orig] = req - tr.mu.Unlock() - - if tr.alwaysSetBasicAuth { - if tr.AuthConfig == nil { - return nil, errors.New("unexpected error: empty auth config") - } - req.SetBasicAuth(tr.Username, tr.Password) - return tr.RoundTripper.RoundTrip(req) - } - - // Don't override - if req.Header.Get("Authorization") == "" { - if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { - req.SetBasicAuth(tr.Username, tr.Password) - } else if len(tr.token) > 0 { - req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) - } - } - resp, err := tr.RoundTripper.RoundTrip(req) - if err != nil { - delete(tr.modReq, orig) - return nil, err - } - if len(resp.Header["X-Docker-Token"]) > 0 { - tr.token = resp.Header["X-Docker-Token"] - } - resp.Body = &ioutils.OnEOFReader{ - Rc: resp.Body, - Fn: func() { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - }, - } - return resp, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (tr *authTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := tr.RoundTripper.(canceler); ok { - tr.mu.Lock() - modReq := tr.modReq[req] - delete(tr.modReq, req) - tr.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { - var alwaysSetBasicAuth bool - - // If we're working with a standalone private registry over HTTPS, send Basic Auth headers - // alongside all our requests. - if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { - info, err := endpoint.Ping() - if err != nil { - return err - } - if info.Standalone && authConfig != nil { - logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) - alwaysSetBasicAuth = true - } - } - - // Annotate the transport unconditionally so that v2 can - // properly fallback on v1 when an image is not found. - client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) - - jar, err := cookiejar.New(nil) - if err != nil { - return errors.New("cookiejar.New is not supposed to return an error") - } - client.Jar = jar - - return nil -} - -func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { - return &Session{ - authConfig: authConfig, - client: client, - indexEndpoint: endpoint, - id: stringid.GenerateRandomID(), - } -} - -// NewSession creates a new session -// TODO(tiborvass): remove authConfig param once registry client v2 is vendored -func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - - return newSession(client, authConfig, endpoint), nil -} - -// ID returns this registry session's ID. -func (r *Session) ID() string { - return r.id -} - -// GetRemoteHistory retrieves the history of a given image from the registry. -// It returns a list of the parent's JSON files (including the requested image). -func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { - res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != 200 { - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) - } - - var history []string - if err := json.NewDecoder(res.Body).Decode(&history); err != nil { - return nil, fmt.Errorf("Error while reading the http response: %v", err) - } - - logrus.Debugf("Ancestry: %v", history) - return history, nil -} - -// LookupRemoteImage checks if an image exists in the registry -func (r *Session) LookupRemoteImage(imgID, registry string) error { - res, err := r.client.Get(registry + "images/" + imgID + "/json") - if err != nil { - return err - } - res.Body.Close() - if res.StatusCode != 200 { - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) - } - return nil -} - -// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. -func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { - res, err := r.client.Get(registry + "images/" + imgID + "/json") - if err != nil { - return nil, -1, fmt.Errorf("Failed to download json: %s", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) - } - // if the size header is not present, then set it to '-1' - imageSize := int64(-1) - if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { - imageSize, err = strconv.ParseInt(hdr, 10, 64) - if err != nil { - return nil, -1, err - } - } - - jsonString, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) - } - return jsonString, imageSize, nil -} - -// GetRemoteImageLayer retrieves an image layer from the registry -func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { - var ( - statusCode = 0 - res *http.Response - err error - imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) - ) - - req, err := http.NewRequest("GET", imageURL, nil) - if err != nil { - return nil, fmt.Errorf("Error while getting from the server: %v", err) - } - statusCode = 0 - res, err = r.client.Do(req) - if err != nil { - logrus.Debugf("Error contacting registry %s: %v", registry, err) - // the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515 - if res != nil { - if res.Body != nil { - res.Body.Close() - } - statusCode = res.StatusCode - } - return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", - statusCode, imgID) - } - - if res.StatusCode != 200 { - res.Body.Close() - return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", - res.StatusCode, imgID) - } - - if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { - logrus.Debug("server supports resume") - return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil - } - logrus.Debug("server doesn't support resume") - return res.Body, nil -} - -// GetRemoteTag retrieves the tag named in the askedTag argument from the given -// repository. It queries each of the registries supplied in the registries -// argument, and returns data from the first one that answers the query -// successfully. -func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { - repository := repositoryRef.RemoteName() - - if strings.Count(repository, "/") == 0 { - // This will be removed once the registry supports auto-resolution on - // the "library" namespace - repository = "library/" + repository - } - for _, host := range registries { - endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) - res, err := r.client.Get(endpoint) - if err != nil { - return "", err - } - - logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) - defer res.Body.Close() - - if res.StatusCode == 404 { - return "", ErrRepoNotFound - } - if res.StatusCode != 200 { - continue - } - - var tagID string - if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { - return "", err - } - return tagID, nil - } - return "", fmt.Errorf("Could not reach any registry endpoint") -} - -// GetRemoteTags retrieves all tags from the given repository. It queries each -// of the registries supplied in the registries argument, and returns data from -// the first one that answers the query successfully. It returns a map with -// tag names as the keys and image IDs as the values. -func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { - repository := repositoryRef.RemoteName() - - if strings.Count(repository, "/") == 0 { - // This will be removed once the registry supports auto-resolution on - // the "library" namespace - repository = "library/" + repository - } - for _, host := range registries { - endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) - res, err := r.client.Get(endpoint) - if err != nil { - return nil, err - } - - logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) - defer res.Body.Close() - - if res.StatusCode == 404 { - return nil, ErrRepoNotFound - } - if res.StatusCode != 200 { - continue - } - - result := make(map[string]string) - if err := json.NewDecoder(res.Body).Decode(&result); err != nil { - return nil, err - } - return result, nil - } - return nil, fmt.Errorf("Could not reach any registry endpoint") -} - -func buildEndpointsList(headers []string, indexEp string) ([]string, error) { - var endpoints []string - parsedURL, err := url.Parse(indexEp) - if err != nil { - return nil, err - } - var urlScheme = parsedURL.Scheme - // The registry's URL scheme has to match the Index' - for _, ep := range headers { - epList := strings.Split(ep, ",") - for _, epListElement := range epList { - endpoints = append( - endpoints, - fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) - } - } - return endpoints, nil -} - -// GetRepositoryData returns lists of images and endpoints for the repository -func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { - repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), name.RemoteName()) - - logrus.Debugf("[registry] Calling GET %s", repositoryTarget) - - req, err := http.NewRequest("GET", repositoryTarget, nil) - if err != nil { - return nil, err - } - // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - // check if the error is because of i/o timeout - // and return a non-obtuse error message for users - // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" - // was a top search on the docker user forum - if isTimeout(err) { - return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) - } - return nil, fmt.Errorf("Error while pulling image: %v", err) - } - defer res.Body.Close() - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - // TODO: Right now we're ignoring checksums in the response body. - // In the future, we need to use them to check image validity. - if res.StatusCode == 404 { - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) - } else if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, name.RemoteName(), errBody), res) - } - - var endpoints []string - if res.Header.Get("X-Docker-Endpoints") != "" { - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) - if err != nil { - return nil, err - } - } else { - // Assume the endpoint is on the same host - endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) - } - - remoteChecksums := []*ImgData{} - if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { - return nil, err - } - - // Forge a better object from the retrieved data - imgsData := make(map[string]*ImgData, len(remoteChecksums)) - for _, elem := range remoteChecksums { - imgsData[elem.ID] = elem - } - - return &RepositoryData{ - ImgList: imgsData, - Endpoints: endpoints, - }, nil -} - -// PushImageChecksumRegistry uploads checksums for an image -func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { - u := registry + "images/" + imgData.ID + "/checksum" - - logrus.Debugf("[registry] Calling PUT %s", u) - - req, err := http.NewRequest("PUT", u, nil) - if err != nil { - return err - } - req.Header.Set("X-Docker-Checksum", imgData.Checksum) - req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) - - res, err := r.client.Do(req) - if err != nil { - return fmt.Errorf("Failed to upload metadata: %v", err) - } - defer res.Body.Close() - if len(res.Cookies()) > 0 { - r.client.Jar.SetCookies(req.URL, res.Cookies()) - } - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) - } - var jsonBody map[string]string - if err := json.Unmarshal(errBody, &jsonBody); err != nil { - errBody = []byte(err.Error()) - } else if jsonBody["error"] == "Image already exists" { - return ErrAlreadyExists - } - return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) - } - return nil -} - -// PushImageJSONRegistry pushes JSON metadata for a local image to the registry -func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { - - u := registry + "images/" + imgData.ID + "/json" - - logrus.Debugf("[registry] Calling PUT %s", u) - - req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) - if err != nil { - return err - } - req.Header.Add("Content-type", "application/json") - - res, err := r.client.Do(req) - if err != nil { - return fmt.Errorf("Failed to upload metadata: %s", err) - } - defer res.Body.Close() - if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { - return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) - } - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) - } - var jsonBody map[string]string - if err := json.Unmarshal(errBody, &jsonBody); err != nil { - errBody = []byte(err.Error()) - } else if jsonBody["error"] == "Image already exists" { - return ErrAlreadyExists - } - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) - } - return nil -} - -// PushImageLayerRegistry sends the checksum of an image layer to the registry -func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { - u := registry + "images/" + imgID + "/layer" - - logrus.Debugf("[registry] Calling PUT %s", u) - - tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) - if err != nil { - return "", "", err - } - h := sha256.New() - h.Write(jsonRaw) - h.Write([]byte{'\n'}) - checksumLayer := io.TeeReader(tarsumLayer, h) - - req, err := http.NewRequest("PUT", u, checksumLayer) - if err != nil { - return "", "", err - } - req.Header.Add("Content-Type", "application/octet-stream") - req.ContentLength = -1 - req.TransferEncoding = []string{"chunked"} - res, err := r.client.Do(req) - if err != nil { - return "", "", fmt.Errorf("Failed to upload layer: %v", err) - } - if rc, ok := layer.(io.Closer); ok { - if err := rc.Close(); err != nil { - return "", "", err - } - } - defer res.Body.Close() - - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) - } - return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) - } - - checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) - return tarsumLayer.Sum(jsonRaw), checksumPayload, nil -} - -// PushRegistryTag pushes a tag on the registry. -// Remote has the format '/ -func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { - // "jsonify" the string - revision = "\"" + revision + "\"" - path := fmt.Sprintf("repositories/%s/tags/%s", remote.RemoteName(), tag) - - req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) - if err != nil { - return err - } - req.Header.Add("Content-type", "application/json") - req.ContentLength = int64(len(revision)) - res, err := r.client.Do(req) - if err != nil { - return err - } - res.Body.Close() - if res.StatusCode != 200 && res.StatusCode != 201 { - return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote.RemoteName()), res) - } - return nil -} - -// PushImageJSONIndex uploads an image list to the repository -func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { - cleanImgList := []*ImgData{} - if validate { - for _, elem := range imgList { - if elem.Checksum != "" { - cleanImgList = append(cleanImgList, elem) - } - } - } else { - cleanImgList = imgList - } - - imgListJSON, err := json.Marshal(cleanImgList) - if err != nil { - return nil, err - } - var suffix string - if validate { - suffix = "images" - } - u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), remote.RemoteName(), suffix) - logrus.Debugf("[registry] PUT %s", u) - logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) - headers := map[string][]string{ - "Content-type": {"application/json"}, - // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests - "X-Docker-Token": {"true"}, - } - if validate { - headers["X-Docker-Endpoints"] = regs - } - - // Redirect if necessary - var res *http.Response - for { - if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { - return nil, err - } - if !shouldRedirect(res) { - break - } - res.Body.Close() - u = res.Header.Get("Location") - logrus.Debugf("Redirected to %s", u) - } - defer res.Body.Close() - - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - - var tokens, endpoints []string - if !validate { - if res.StatusCode != 200 && res.StatusCode != 201 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) - } - tokens = res.Header["X-Docker-Token"] - logrus.Debugf("Auth token: %v", tokens) - - if res.Header.Get("X-Docker-Endpoints") == "" { - return nil, fmt.Errorf("Index response didn't contain any endpoints") - } - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) - if err != nil { - return nil, err - } - } else { - if res.StatusCode != 204 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) - } - } - - return &RepositoryData{ - Endpoints: endpoints, - }, nil -} - -func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { - req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) - if err != nil { - return nil, err - } - req.ContentLength = int64(len(body)) - for k, v := range headers { - req.Header[k] = v - } - response, err := r.client.Do(req) - if err != nil { - return nil, err - } - return response, nil -} - -func shouldRedirect(response *http.Response) bool { - return response.StatusCode >= 300 && response.StatusCode < 400 -} - -// SearchRepositories performs a search against the remote repository -func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { - if limit < 1 || limit > 100 { - return nil, fmt.Errorf("Limit %d is outside the range of [1, 100]", limit) - } - logrus.Debugf("Index server: %s", r.indexEndpoint) - u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, fmt.Errorf("Error while getting from the server: %v", err) - } - // Have the AuthTransport send authentication, when logged in. - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != 200 { - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) - } - result := new(registrytypes.SearchResults) - return result, json.NewDecoder(res.Body).Decode(result) -} - -// GetAuthConfig returns the authentication settings for a session -// TODO(tiborvass): remove this once registry client v2 is vendored -func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig { - password := "" - if withPasswd { - password = r.authConfig.Password - } - return &types.AuthConfig{ - Username: r.authConfig.Username, - Password: password, - } -} - -func isTimeout(err error) bool { - type timeout interface { - Timeout() bool - } - e := err - switch urlErr := err.(type) { - case *url.Error: - e = urlErr.Err - } - t, ok := e.(timeout) - return ok && t.Timeout() -} diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go deleted file mode 100644 index 49c123a3e..000000000 --- a/vendor/github.com/docker/docker/registry/types.go +++ /dev/null @@ -1,73 +0,0 @@ -package registry - -import ( - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/reference" -) - -// RepositoryData tracks the image list, list of endpoints, and list of tokens -// for a repository -type RepositoryData struct { - // ImgList is a list of images in the repository - ImgList map[string]*ImgData - // Endpoints is a list of endpoints returned in X-Docker-Endpoints - Endpoints []string - // Tokens is currently unused (remove it?) - Tokens []string -} - -// ImgData is used to transfer image checksums to and from the registry -type ImgData struct { - // ID is an opaque string that identifies the image - ID string `json:"id"` - Checksum string `json:"checksum,omitempty"` - ChecksumPayload string `json:"-"` - Tag string `json:",omitempty"` -} - -// PingResult contains the information returned when pinging a registry. It -// indicates the registry's version and whether the registry claims to be a -// standalone registry. -type PingResult struct { - // Version is the registry version supplied by the registry in an HTTP - // header - Version string `json:"version"` - // Standalone is set to true if the registry indicates it is a - // standalone registry in the X-Docker-Registry-Standalone - // header - Standalone bool `json:"standalone"` -} - -// APIVersion is an integral representation of an API version (presently -// either 1 or 2) -type APIVersion int - -func (av APIVersion) String() string { - return apiVersions[av] -} - -// API Version identifiers. -const ( - _ = iota - APIVersion1 APIVersion = iota - APIVersion2 -) - -var apiVersions = map[APIVersion]string{ - APIVersion1: "v1", - APIVersion2: "v2", -} - -// RepositoryInfo describes a repository -type RepositoryInfo struct { - reference.Named - // Index points to registry information - Index *registrytypes.IndexInfo - // Official indicates whether the repository is considered official. - // If the registry is official, and the normalized name does not - // contain a '/' (e.g. "foo"), then it is considered an official repo. - Official bool - // Class represents the class of the repository, such as "plugin" - // or "image". - Class string -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/envfile.go b/vendor/github.com/docker/docker/runconfig/opts/envfile.go deleted file mode 100644 index f72379921..000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/envfile.go +++ /dev/null @@ -1,81 +0,0 @@ -package opts - -import ( - "bufio" - "bytes" - "fmt" - "os" - "strings" - "unicode" - "unicode/utf8" -) - -// ParseEnvFile reads a file with environment variables enumerated by lines -// -// ``Environment variable names used by the utilities in the Shell and -// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase -// letters, digits, and the '_' (underscore) from the characters defined in -// Portable Character Set and do not begin with a digit. *But*, other -// characters may be permitted by an implementation; applications shall -// tolerate the presence of such names.'' -// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html -// -// As of #16585, it's up to application inside docker to validate or not -// environment variables, that's why we just strip leading whitespace and -// nothing more. -func ParseEnvFile(filename string) ([]string, error) { - fh, err := os.Open(filename) - if err != nil { - return []string{}, err - } - defer fh.Close() - - lines := []string{} - scanner := bufio.NewScanner(fh) - currentLine := 0 - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - if !utf8.Valid(scannedBytes) { - return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes) - } - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - // trim the line from all leading whitespace first - line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) - currentLine++ - // line is not empty, and not starting with '#' - if len(line) > 0 && !strings.HasPrefix(line, "#") { - data := strings.SplitN(line, "=", 2) - - // trim the front of a variable, but nothing else - variable := strings.TrimLeft(data[0], whiteSpaces) - if strings.ContainsAny(variable, whiteSpaces) { - return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} - } - - if len(data) > 1 { - - // pass the value through, no trimming - lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) - } else { - // if only a pass-through variable is given, clean it up. - lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) - } - } - } - return lines, scanner.Err() -} - -var whiteSpaces = " \t" - -// ErrBadEnvVariable typed error for bad environment variable -type ErrBadEnvVariable struct { - msg string -} - -func (e ErrBadEnvVariable) Error() string { - return fmt.Sprintf("poorly formatted environment: %s", e.msg) -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/opts.go b/vendor/github.com/docker/docker/runconfig/opts/opts.go deleted file mode 100644 index cae0432f0..000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/opts.go +++ /dev/null @@ -1,83 +0,0 @@ -package opts - -import ( - "fmt" - "net" - "os" - "runtime" - "strings" - - fopts "github.com/docker/docker/opts" -) - -// ValidateAttach validates that the specified string is a valid attach option. -func ValidateAttach(val string) (string, error) { - s := strings.ToLower(val) - for _, str := range []string{"stdin", "stdout", "stderr"} { - if s == str { - return s, nil - } - } - return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") -} - -// ValidateEnv validates an environment variable and returns it. -// If no value is specified, it returns the current value using os.Getenv. -// -// As on ParseEnvFile and related to #16585, environment variable names -// are not validate what so ever, it's up to application inside docker -// to validate them or not. -// -// The only validation here is to check if name is empty, per #25099 -func ValidateEnv(val string) (string, error) { - arr := strings.Split(val, "=") - if arr[0] == "" { - return "", fmt.Errorf("invalid environment variable: %s", val) - } - if len(arr) > 1 { - return val, nil - } - if !doesEnvExist(val) { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - -func doesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if runtime.GOOS == "windows" { - // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. - if strings.EqualFold(parts[0], name) { - return true - } - } - if parts[0] == name { - return true - } - } - return false -} - -// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). -func ValidateExtraHost(val string) (string, error) { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return "", fmt.Errorf("bad format for add-host: %q", val) - } - if _, err := fopts.ValidateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - return val, nil -} - -// ValidateMACAddress validates a MAC address. -func ValidateMACAddress(val string) (string, error) { - _, err := net.ParseMAC(strings.TrimSpace(val)) - if err != nil { - return "", err - } - return val, nil -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/parse.go b/vendor/github.com/docker/docker/runconfig/opts/parse.go deleted file mode 100644 index 71a89277e..000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/parse.go +++ /dev/null @@ -1,995 +0,0 @@ -package opts - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "path" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/signal" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" - "github.com/spf13/pflag" -) - -// ContainerOptions is a data object with all the options for creating a container -type ContainerOptions struct { - attach opts.ListOpts - volumes opts.ListOpts - tmpfs opts.ListOpts - blkioWeightDevice WeightdeviceOpt - deviceReadBps ThrottledeviceOpt - deviceWriteBps ThrottledeviceOpt - links opts.ListOpts - aliases opts.ListOpts - linkLocalIPs opts.ListOpts - deviceReadIOps ThrottledeviceOpt - deviceWriteIOps ThrottledeviceOpt - env opts.ListOpts - labels opts.ListOpts - devices opts.ListOpts - ulimits *UlimitOpt - sysctls *opts.MapOpts - publish opts.ListOpts - expose opts.ListOpts - dns opts.ListOpts - dnsSearch opts.ListOpts - dnsOptions opts.ListOpts - extraHosts opts.ListOpts - volumesFrom opts.ListOpts - envFile opts.ListOpts - capAdd opts.ListOpts - capDrop opts.ListOpts - groupAdd opts.ListOpts - securityOpt opts.ListOpts - storageOpt opts.ListOpts - labelsFile opts.ListOpts - loggingOpts opts.ListOpts - privileged bool - pidMode string - utsMode string - usernsMode string - publishAll bool - stdin bool - tty bool - oomKillDisable bool - oomScoreAdj int - containerIDFile string - entrypoint string - hostname string - memoryString string - memoryReservation string - memorySwap string - kernelMemory string - user string - workingDir string - cpuCount int64 - cpuShares int64 - cpuPercent int64 - cpuPeriod int64 - cpuRealtimePeriod int64 - cpuRealtimeRuntime int64 - cpuQuota int64 - cpus opts.NanoCPUs - cpusetCpus string - cpusetMems string - blkioWeight uint16 - ioMaxBandwidth string - ioMaxIOps uint64 - swappiness int64 - netMode string - macAddress string - ipv4Address string - ipv6Address string - ipcMode string - pidsLimit int64 - restartPolicy string - readonlyRootfs bool - loggingDriver string - cgroupParent string - volumeDriver string - stopSignal string - stopTimeout int - isolation string - shmSize string - noHealthcheck bool - healthCmd string - healthInterval time.Duration - healthTimeout time.Duration - healthRetries int - runtime string - autoRemove bool - init bool - initPath string - credentialSpec string - - Image string - Args []string -} - -// AddFlags adds all command line flags that will be used by Parse to the FlagSet -func AddFlags(flags *pflag.FlagSet) *ContainerOptions { - copts := &ContainerOptions{ - aliases: opts.NewListOpts(nil), - attach: opts.NewListOpts(ValidateAttach), - blkioWeightDevice: NewWeightdeviceOpt(ValidateWeightDevice), - capAdd: opts.NewListOpts(nil), - capDrop: opts.NewListOpts(nil), - dns: opts.NewListOpts(opts.ValidateIPAddress), - dnsOptions: opts.NewListOpts(nil), - dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), - deviceReadBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), - deviceReadIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), - deviceWriteBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), - deviceWriteIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), - devices: opts.NewListOpts(ValidateDevice), - env: opts.NewListOpts(ValidateEnv), - envFile: opts.NewListOpts(nil), - expose: opts.NewListOpts(nil), - extraHosts: opts.NewListOpts(ValidateExtraHost), - groupAdd: opts.NewListOpts(nil), - labels: opts.NewListOpts(ValidateEnv), - labelsFile: opts.NewListOpts(nil), - linkLocalIPs: opts.NewListOpts(nil), - links: opts.NewListOpts(ValidateLink), - loggingOpts: opts.NewListOpts(nil), - publish: opts.NewListOpts(nil), - securityOpt: opts.NewListOpts(nil), - storageOpt: opts.NewListOpts(nil), - sysctls: opts.NewMapOpts(nil, opts.ValidateSysctl), - tmpfs: opts.NewListOpts(nil), - ulimits: NewUlimitOpt(nil), - volumes: opts.NewListOpts(nil), - volumesFrom: opts.NewListOpts(nil), - } - - // General purpose flags - flags.VarP(&copts.attach, "attach", "a", "Attach to STDIN, STDOUT or STDERR") - flags.Var(&copts.devices, "device", "Add a host device to the container") - flags.VarP(&copts.env, "env", "e", "Set environment variables") - flags.Var(&copts.envFile, "env-file", "Read in a file of environment variables") - flags.StringVar(&copts.entrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image") - flags.Var(&copts.groupAdd, "group-add", "Add additional groups to join") - flags.StringVarP(&copts.hostname, "hostname", "h", "", "Container host name") - flags.BoolVarP(&copts.stdin, "interactive", "i", false, "Keep STDIN open even if not attached") - flags.VarP(&copts.labels, "label", "l", "Set meta data on a container") - flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels") - flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only") - flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits") - flags.StringVar(&copts.stopSignal, "stop-signal", signal.DefaultStopSignal, fmt.Sprintf("Signal to stop a container, %v by default", signal.DefaultStopSignal)) - flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container") - flags.SetAnnotation("stop-timeout", "version", []string{"1.25"}) - flags.Var(copts.sysctls, "sysctl", "Sysctl options") - flags.BoolVarP(&copts.tty, "tty", "t", false, "Allocate a pseudo-TTY") - flags.Var(copts.ulimits, "ulimit", "Ulimit options") - flags.StringVarP(&copts.user, "user", "u", "", "Username or UID (format: [:])") - flags.StringVarP(&copts.workingDir, "workdir", "w", "", "Working directory inside the container") - flags.BoolVar(&copts.autoRemove, "rm", false, "Automatically remove the container when it exits") - - // Security - flags.Var(&copts.capAdd, "cap-add", "Add Linux capabilities") - flags.Var(&copts.capDrop, "cap-drop", "Drop Linux capabilities") - flags.BoolVar(&copts.privileged, "privileged", false, "Give extended privileges to this container") - flags.Var(&copts.securityOpt, "security-opt", "Security Options") - flags.StringVar(&copts.usernsMode, "userns", "", "User namespace to use") - flags.StringVar(&copts.credentialSpec, "credentialspec", "", "Credential spec for managed service account (Windows only)") - - // Network and port publishing flag - flags.Var(&copts.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") - flags.Var(&copts.dns, "dns", "Set custom DNS servers") - // We allow for both "--dns-opt" and "--dns-option", although the latter is the recommended way. - // This is to be consistent with service create/update - flags.Var(&copts.dnsOptions, "dns-opt", "Set DNS options") - flags.Var(&copts.dnsOptions, "dns-option", "Set DNS options") - flags.MarkHidden("dns-opt") - flags.Var(&copts.dnsSearch, "dns-search", "Set custom DNS search domains") - flags.Var(&copts.expose, "expose", "Expose a port or a range of ports") - flags.StringVar(&copts.ipv4Address, "ip", "", "Container IPv4 address (e.g. 172.30.100.104)") - flags.StringVar(&copts.ipv6Address, "ip6", "", "Container IPv6 address (e.g. 2001:db8::33)") - flags.Var(&copts.links, "link", "Add link to another container") - flags.Var(&copts.linkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses") - flags.StringVar(&copts.macAddress, "mac-address", "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") - flags.VarP(&copts.publish, "publish", "p", "Publish a container's port(s) to the host") - flags.BoolVarP(&copts.publishAll, "publish-all", "P", false, "Publish all exposed ports to random ports") - // We allow for both "--net" and "--network", although the latter is the recommended way. - flags.StringVar(&copts.netMode, "net", "default", "Connect a container to a network") - flags.StringVar(&copts.netMode, "network", "default", "Connect a container to a network") - flags.MarkHidden("net") - // We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way. - flags.Var(&copts.aliases, "net-alias", "Add network-scoped alias for the container") - flags.Var(&copts.aliases, "network-alias", "Add network-scoped alias for the container") - flags.MarkHidden("net-alias") - - // Logging and storage - flags.StringVar(&copts.loggingDriver, "log-driver", "", "Logging driver for the container") - flags.StringVar(&copts.volumeDriver, "volume-driver", "", "Optional volume driver for the container") - flags.Var(&copts.loggingOpts, "log-opt", "Log driver options") - flags.Var(&copts.storageOpt, "storage-opt", "Storage driver options for the container") - flags.Var(&copts.tmpfs, "tmpfs", "Mount a tmpfs directory") - flags.Var(&copts.volumesFrom, "volumes-from", "Mount volumes from the specified container(s)") - flags.VarP(&copts.volumes, "volume", "v", "Bind mount a volume") - - // Health-checking - flags.StringVar(&copts.healthCmd, "health-cmd", "", "Command to run to check health") - flags.DurationVar(&copts.healthInterval, "health-interval", 0, "Time between running the check (ns|us|ms|s|m|h) (default 0s)") - flags.IntVar(&copts.healthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy") - flags.DurationVar(&copts.healthTimeout, "health-timeout", 0, "Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s)") - flags.BoolVar(&copts.noHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK") - - // Resource management - flags.Uint16Var(&copts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") - flags.Var(&copts.blkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)") - flags.StringVar(&copts.containerIDFile, "cidfile", "", "Write the container ID to the file") - flags.StringVar(&copts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&copts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.Int64Var(&copts.cpuCount, "cpu-count", 0, "CPU count (Windows only)") - flags.Int64Var(&copts.cpuPercent, "cpu-percent", 0, "CPU percent (Windows only)") - flags.Int64Var(&copts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&copts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") - flags.Int64Var(&copts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit CPU real-time period in microseconds") - flags.Int64Var(&copts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit CPU real-time runtime in microseconds") - flags.Int64VarP(&copts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.Var(&copts.cpus, "cpus", "Number of CPUs") - flags.Var(&copts.deviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device") - flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device") - flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device") - flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device") - flags.StringVar(&copts.ioMaxBandwidth, "io-maxbandwidth", "", "Maximum IO bandwidth limit for the system drive (Windows only)") - flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)") - flags.StringVar(&copts.kernelMemory, "kernel-memory", "", "Kernel memory limit") - flags.StringVarP(&copts.memoryString, "memory", "m", "", "Memory limit") - flags.StringVar(&copts.memoryReservation, "memory-reservation", "", "Memory soft limit") - flags.StringVar(&copts.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.Int64Var(&copts.swappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)") - flags.BoolVar(&copts.oomKillDisable, "oom-kill-disable", false, "Disable OOM Killer") - flags.IntVar(&copts.oomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)") - flags.Int64Var(&copts.pidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") - - // Low-level execution (cgroups, namespaces, ...) - flags.StringVar(&copts.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") - flags.StringVar(&copts.ipcMode, "ipc", "", "IPC namespace to use") - flags.StringVar(&copts.isolation, "isolation", "", "Container isolation technology") - flags.StringVar(&copts.pidMode, "pid", "", "PID namespace to use") - flags.StringVar(&copts.shmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") - flags.StringVar(&copts.utsMode, "uts", "", "UTS namespace to use") - flags.StringVar(&copts.runtime, "runtime", "", "Runtime to use for this container") - - flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes") - flags.StringVar(&copts.initPath, "init-path", "", "Path to the docker-init binary") - return copts -} - -// Parse parses the args for the specified command and generates a Config, -// a HostConfig and returns them with the specified command. -// If the specified args are not valid, it will return an error. -func Parse(flags *pflag.FlagSet, copts *ContainerOptions) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - var ( - attachStdin = copts.attach.Get("stdin") - attachStdout = copts.attach.Get("stdout") - attachStderr = copts.attach.Get("stderr") - ) - - // Validate the input mac address - if copts.macAddress != "" { - if _, err := ValidateMACAddress(copts.macAddress); err != nil { - return nil, nil, nil, fmt.Errorf("%s is not a valid mac address", copts.macAddress) - } - } - if copts.stdin { - attachStdin = true - } - // If -a is not set, attach to stdout and stderr - if copts.attach.Len() == 0 { - attachStdout = true - attachStderr = true - } - - var err error - - var memory int64 - if copts.memoryString != "" { - memory, err = units.RAMInBytes(copts.memoryString) - if err != nil { - return nil, nil, nil, err - } - } - - var memoryReservation int64 - if copts.memoryReservation != "" { - memoryReservation, err = units.RAMInBytes(copts.memoryReservation) - if err != nil { - return nil, nil, nil, err - } - } - - var memorySwap int64 - if copts.memorySwap != "" { - if copts.memorySwap == "-1" { - memorySwap = -1 - } else { - memorySwap, err = units.RAMInBytes(copts.memorySwap) - if err != nil { - return nil, nil, nil, err - } - } - } - - var kernelMemory int64 - if copts.kernelMemory != "" { - kernelMemory, err = units.RAMInBytes(copts.kernelMemory) - if err != nil { - return nil, nil, nil, err - } - } - - swappiness := copts.swappiness - if swappiness != -1 && (swappiness < 0 || swappiness > 100) { - return nil, nil, nil, fmt.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) - } - - var shmSize int64 - if copts.shmSize != "" { - shmSize, err = units.RAMInBytes(copts.shmSize) - if err != nil { - return nil, nil, nil, err - } - } - - // TODO FIXME units.RAMInBytes should have a uint64 version - var maxIOBandwidth int64 - if copts.ioMaxBandwidth != "" { - maxIOBandwidth, err = units.RAMInBytes(copts.ioMaxBandwidth) - if err != nil { - return nil, nil, nil, err - } - if maxIOBandwidth < 0 { - return nil, nil, nil, fmt.Errorf("invalid value: %s. Maximum IO Bandwidth must be positive", copts.ioMaxBandwidth) - } - } - - var binds []string - volumes := copts.volumes.GetMap() - // add any bind targets to the list of container volumes - for bind := range copts.volumes.GetMap() { - if arr := volumeSplitN(bind, 2); len(arr) > 1 { - // after creating the bind mount we want to delete it from the copts.volumes values because - // we do not want bind mounts being committed to image configs - binds = append(binds, bind) - // We should delete from the map (`volumes`) here, as deleting from copts.volumes will not work if - // there are duplicates entries. - delete(volumes, bind) - } - } - - // Can't evaluate options passed into --tmpfs until we actually mount - tmpfs := make(map[string]string) - for _, t := range copts.tmpfs.GetAll() { - if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { - tmpfs[arr[0]] = arr[1] - } else { - tmpfs[arr[0]] = "" - } - } - - var ( - runCmd strslice.StrSlice - entrypoint strslice.StrSlice - ) - - if len(copts.Args) > 0 { - runCmd = strslice.StrSlice(copts.Args) - } - - if copts.entrypoint != "" { - entrypoint = strslice.StrSlice{copts.entrypoint} - } else if flags.Changed("entrypoint") { - // if `--entrypoint=` is parsed then Entrypoint is reset - entrypoint = []string{""} - } - - ports, portBindings, err := nat.ParsePortSpecs(copts.publish.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - // Merge in exposed ports to the map of published ports - for _, e := range copts.expose.GetAll() { - if strings.Contains(e, ":") { - return nil, nil, nil, fmt.Errorf("invalid port format for --expose: %s", e) - } - //support two formats for expose, original format /[] or /[] - proto, port := nat.SplitProtoPort(e) - //parse the start and end port and create a sequence of ports to expose - //if expose a port, the start and end port are the same - start, end, err := nat.ParsePortRange(port) - if err != nil { - return nil, nil, nil, fmt.Errorf("invalid range format for --expose: %s, error: %s", e, err) - } - for i := start; i <= end; i++ { - p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) - if err != nil { - return nil, nil, nil, err - } - if _, exists := ports[p]; !exists { - ports[p] = struct{}{} - } - } - } - - // parse device mappings - deviceMappings := []container.DeviceMapping{} - for _, device := range copts.devices.GetAll() { - deviceMapping, err := ParseDevice(device) - if err != nil { - return nil, nil, nil, err - } - deviceMappings = append(deviceMappings, deviceMapping) - } - - // collect all the environment variables for the container - envVariables, err := ReadKVStrings(copts.envFile.GetAll(), copts.env.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - // collect all the labels for the container - labels, err := ReadKVStrings(copts.labelsFile.GetAll(), copts.labels.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - ipcMode := container.IpcMode(copts.ipcMode) - if !ipcMode.Valid() { - return nil, nil, nil, fmt.Errorf("--ipc: invalid IPC mode") - } - - pidMode := container.PidMode(copts.pidMode) - if !pidMode.Valid() { - return nil, nil, nil, fmt.Errorf("--pid: invalid PID mode") - } - - utsMode := container.UTSMode(copts.utsMode) - if !utsMode.Valid() { - return nil, nil, nil, fmt.Errorf("--uts: invalid UTS mode") - } - - usernsMode := container.UsernsMode(copts.usernsMode) - if !usernsMode.Valid() { - return nil, nil, nil, fmt.Errorf("--userns: invalid USER mode") - } - - restartPolicy, err := ParseRestartPolicy(copts.restartPolicy) - if err != nil { - return nil, nil, nil, err - } - - loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - securityOpts, err := parseSecurityOpts(copts.securityOpt.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - storageOpts, err := parseStorageOpts(copts.storageOpt.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - // Healthcheck - var healthConfig *container.HealthConfig - haveHealthSettings := copts.healthCmd != "" || - copts.healthInterval != 0 || - copts.healthTimeout != 0 || - copts.healthRetries != 0 - if copts.noHealthcheck { - if haveHealthSettings { - return nil, nil, nil, fmt.Errorf("--no-healthcheck conflicts with --health-* options") - } - test := strslice.StrSlice{"NONE"} - healthConfig = &container.HealthConfig{Test: test} - } else if haveHealthSettings { - var probe strslice.StrSlice - if copts.healthCmd != "" { - args := []string{"CMD-SHELL", copts.healthCmd} - probe = strslice.StrSlice(args) - } - if copts.healthInterval < 0 { - return nil, nil, nil, fmt.Errorf("--health-interval cannot be negative") - } - if copts.healthTimeout < 0 { - return nil, nil, nil, fmt.Errorf("--health-timeout cannot be negative") - } - - healthConfig = &container.HealthConfig{ - Test: probe, - Interval: copts.healthInterval, - Timeout: copts.healthTimeout, - Retries: copts.healthRetries, - } - } - - resources := container.Resources{ - CgroupParent: copts.cgroupParent, - Memory: memory, - MemoryReservation: memoryReservation, - MemorySwap: memorySwap, - MemorySwappiness: &copts.swappiness, - KernelMemory: kernelMemory, - OomKillDisable: &copts.oomKillDisable, - NanoCPUs: copts.cpus.Value(), - CPUCount: copts.cpuCount, - CPUPercent: copts.cpuPercent, - CPUShares: copts.cpuShares, - CPUPeriod: copts.cpuPeriod, - CpusetCpus: copts.cpusetCpus, - CpusetMems: copts.cpusetMems, - CPUQuota: copts.cpuQuota, - CPURealtimePeriod: copts.cpuRealtimePeriod, - CPURealtimeRuntime: copts.cpuRealtimeRuntime, - PidsLimit: copts.pidsLimit, - BlkioWeight: copts.blkioWeight, - BlkioWeightDevice: copts.blkioWeightDevice.GetList(), - BlkioDeviceReadBps: copts.deviceReadBps.GetList(), - BlkioDeviceWriteBps: copts.deviceWriteBps.GetList(), - BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(), - BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(), - IOMaximumIOps: copts.ioMaxIOps, - IOMaximumBandwidth: uint64(maxIOBandwidth), - Ulimits: copts.ulimits.GetList(), - Devices: deviceMappings, - } - - config := &container.Config{ - Hostname: copts.hostname, - ExposedPorts: ports, - User: copts.user, - Tty: copts.tty, - // TODO: deprecated, it comes from -n, --networking - // it's still needed internally to set the network to disabled - // if e.g. bridge is none in daemon opts, and in inspect - NetworkDisabled: false, - OpenStdin: copts.stdin, - AttachStdin: attachStdin, - AttachStdout: attachStdout, - AttachStderr: attachStderr, - Env: envVariables, - Cmd: runCmd, - Image: copts.Image, - Volumes: volumes, - MacAddress: copts.macAddress, - Entrypoint: entrypoint, - WorkingDir: copts.workingDir, - Labels: ConvertKVStringsToMap(labels), - Healthcheck: healthConfig, - } - if flags.Changed("stop-signal") { - config.StopSignal = copts.stopSignal - } - if flags.Changed("stop-timeout") { - config.StopTimeout = &copts.stopTimeout - } - - hostConfig := &container.HostConfig{ - Binds: binds, - ContainerIDFile: copts.containerIDFile, - OomScoreAdj: copts.oomScoreAdj, - AutoRemove: copts.autoRemove, - Privileged: copts.privileged, - PortBindings: portBindings, - Links: copts.links.GetAll(), - PublishAllPorts: copts.publishAll, - // Make sure the dns fields are never nil. - // New containers don't ever have those fields nil, - // but pre created containers can still have those nil values. - // See https://github.com/docker/docker/pull/17779 - // for a more detailed explanation on why we don't want that. - DNS: copts.dns.GetAllOrEmpty(), - DNSSearch: copts.dnsSearch.GetAllOrEmpty(), - DNSOptions: copts.dnsOptions.GetAllOrEmpty(), - ExtraHosts: copts.extraHosts.GetAll(), - VolumesFrom: copts.volumesFrom.GetAll(), - NetworkMode: container.NetworkMode(copts.netMode), - IpcMode: ipcMode, - PidMode: pidMode, - UTSMode: utsMode, - UsernsMode: usernsMode, - CapAdd: strslice.StrSlice(copts.capAdd.GetAll()), - CapDrop: strslice.StrSlice(copts.capDrop.GetAll()), - GroupAdd: copts.groupAdd.GetAll(), - RestartPolicy: restartPolicy, - SecurityOpt: securityOpts, - StorageOpt: storageOpts, - ReadonlyRootfs: copts.readonlyRootfs, - LogConfig: container.LogConfig{Type: copts.loggingDriver, Config: loggingOpts}, - VolumeDriver: copts.volumeDriver, - Isolation: container.Isolation(copts.isolation), - ShmSize: shmSize, - Resources: resources, - Tmpfs: tmpfs, - Sysctls: copts.sysctls.GetAll(), - Runtime: copts.runtime, - } - - // only set this value if the user provided the flag, else it should default to nil - if flags.Changed("init") { - hostConfig.Init = &copts.init - } - - // When allocating stdin in attached mode, close stdin at client disconnect - if config.OpenStdin && config.AttachStdin { - config.StdinOnce = true - } - - networkingConfig := &networktypes.NetworkingConfig{ - EndpointsConfig: make(map[string]*networktypes.EndpointSettings), - } - - if copts.ipv4Address != "" || copts.ipv6Address != "" || copts.linkLocalIPs.Len() > 0 { - epConfig := &networktypes.EndpointSettings{} - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - - epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{ - IPv4Address: copts.ipv4Address, - IPv6Address: copts.ipv6Address, - } - - if copts.linkLocalIPs.Len() > 0 { - epConfig.IPAMConfig.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len()) - copy(epConfig.IPAMConfig.LinkLocalIPs, copts.linkLocalIPs.GetAll()) - } - } - - if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 { - epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] - if epConfig == nil { - epConfig = &networktypes.EndpointSettings{} - } - epConfig.Links = make([]string, len(hostConfig.Links)) - copy(epConfig.Links, hostConfig.Links) - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - } - - if copts.aliases.Len() > 0 { - epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] - if epConfig == nil { - epConfig = &networktypes.EndpointSettings{} - } - epConfig.Aliases = make([]string, copts.aliases.Len()) - copy(epConfig.Aliases, copts.aliases.GetAll()) - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - } - - return config, hostConfig, networkingConfig, nil -} - -// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys -// present in the file with additional pairs specified in the override parameter -func ReadKVStrings(files []string, override []string) ([]string, error) { - envVariables := []string{} - for _, ef := range files { - parsedVars, err := ParseEnvFile(ef) - if err != nil { - return nil, err - } - envVariables = append(envVariables, parsedVars...) - } - // parse the '-e' and '--env' after, to allow override - envVariables = append(envVariables, override...) - - return envVariables, nil -} - -// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} -func ConvertKVStringsToMap(values []string) map[string]string { - result := make(map[string]string, len(values)) - for _, value := range values { - kv := strings.SplitN(value, "=", 2) - if len(kv) == 1 { - result[kv[0]] = "" - } else { - result[kv[0]] = kv[1] - } - } - - return result -} - -// ConvertKVStringsToMapWithNil converts ["key=value"] to {"key":"value"} -// but set unset keys to nil - meaning the ones with no "=" in them. -// We use this in cases where we need to distinguish between -// FOO= and FOO -// where the latter case just means FOO was mentioned but not given a value -func ConvertKVStringsToMapWithNil(values []string) map[string]*string { - result := make(map[string]*string, len(values)) - for _, value := range values { - kv := strings.SplitN(value, "=", 2) - if len(kv) == 1 { - result[kv[0]] = nil - } else { - result[kv[0]] = &kv[1] - } - } - - return result -} - -func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { - loggingOptsMap := ConvertKVStringsToMap(loggingOpts) - if loggingDriver == "none" && len(loggingOpts) > 0 { - return map[string]string{}, fmt.Errorf("invalid logging opts for driver %s", loggingDriver) - } - return loggingOptsMap, nil -} - -// takes a local seccomp daemon, reads the file contents for sending to the daemon -func parseSecurityOpts(securityOpts []string) ([]string, error) { - for key, opt := range securityOpts { - con := strings.SplitN(opt, "=", 2) - if len(con) == 1 && con[0] != "no-new-privileges" { - if strings.Contains(opt, ":") { - con = strings.SplitN(opt, ":", 2) - } else { - return securityOpts, fmt.Errorf("Invalid --security-opt: %q", opt) - } - } - if con[0] == "seccomp" && con[1] != "unconfined" { - f, err := ioutil.ReadFile(con[1]) - if err != nil { - return securityOpts, fmt.Errorf("opening seccomp profile (%s) failed: %v", con[1], err) - } - b := bytes.NewBuffer(nil) - if err := json.Compact(b, f); err != nil { - return securityOpts, fmt.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err) - } - securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes()) - } - } - - return securityOpts, nil -} - -// parses storage options per container into a map -func parseStorageOpts(storageOpts []string) (map[string]string, error) { - m := make(map[string]string) - for _, option := range storageOpts { - if strings.Contains(option, "=") { - opt := strings.SplitN(option, "=", 2) - m[opt[0]] = opt[1] - } else { - return nil, fmt.Errorf("invalid storage option") - } - } - return m, nil -} - -// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect -func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { - p := container.RestartPolicy{} - - if policy == "" { - return p, nil - } - - parts := strings.Split(policy, ":") - - if len(parts) > 2 { - return p, fmt.Errorf("invalid restart policy format") - } - if len(parts) == 2 { - count, err := strconv.Atoi(parts[1]) - if err != nil { - return p, fmt.Errorf("maximum retry count must be an integer") - } - - p.MaximumRetryCount = count - } - - p.Name = parts[0] - - return p, nil -} - -// ParseDevice parses a device mapping string to a container.DeviceMapping struct -func ParseDevice(device string) (container.DeviceMapping, error) { - src := "" - dst := "" - permissions := "rwm" - arr := strings.Split(device, ":") - switch len(arr) { - case 3: - permissions = arr[2] - fallthrough - case 2: - if ValidDeviceMode(arr[1]) { - permissions = arr[1] - } else { - dst = arr[1] - } - fallthrough - case 1: - src = arr[0] - default: - return container.DeviceMapping{}, fmt.Errorf("invalid device specification: %s", device) - } - - if dst == "" { - dst = src - } - - deviceMapping := container.DeviceMapping{ - PathOnHost: src, - PathInContainer: dst, - CgroupPermissions: permissions, - } - return deviceMapping, nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get a HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} - -// ValidateLink validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - if _, _, err := ParseLink(val); err != nil { - return val, err - } - return val, nil -} - -// ValidDeviceMode checks if the mode for device is valid or not. -// Valid mode is a composition of r (read), w (write), and m (mknod). -func ValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ - 'r': true, - 'w': true, - 'm': true, - } - if mode == "" { - return false - } - for _, c := range mode { - if !legalDeviceMode[c] { - return false - } - legalDeviceMode[c] = false - } - return true -} - -// ValidateDevice validates a path for devices -// It will make sure 'val' is in the form: -// [host-dir:]container-path[:mode] -// It also validates the device mode. -func ValidateDevice(val string) (string, error) { - return validatePath(val, ValidDeviceMode) -} - -func validatePath(val string, validator func(string) bool) (string, error) { - var containerPath string - var mode string - - if strings.Count(val, ":") > 2 { - return val, fmt.Errorf("bad format for path: %s", val) - } - - split := strings.SplitN(val, ":", 3) - if split[0] == "" { - return val, fmt.Errorf("bad format for path: %s", val) - } - switch len(split) { - case 1: - containerPath = split[0] - val = path.Clean(containerPath) - case 2: - if isValid := validator(split[1]); isValid { - containerPath = split[0] - mode = split[1] - val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) - } else { - containerPath = split[1] - val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) - } - case 3: - containerPath = split[1] - mode = split[2] - if isValid := validator(split[2]); !isValid { - return val, fmt.Errorf("bad mode specified: %s", mode) - } - val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) - } - - if !path.IsAbs(containerPath) { - return val, fmt.Errorf("%s is not an absolute path", containerPath) - } - return val, nil -} - -// volumeSplitN splits raw into a maximum of n parts, separated by a separator colon. -// A separator colon is the last `:` character in the regex `[:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). -// In Windows driver letter appears in two situations: -// a. `^[a-zA-Z]:` (A colon followed by `^[a-zA-Z]:` is OK as colon is the separator in volume option) -// b. A string in the format like `\\?\C:\Windows\...` (UNC). -// Therefore, a driver letter can only follow either a `:` or `\\` -// This allows to correctly split strings such as `C:\foo:D:\:rw` or `/tmp/q:/foo`. -func volumeSplitN(raw string, n int) []string { - var array []string - if len(raw) == 0 || raw[0] == ':' { - // invalid - return nil - } - // numberOfParts counts the number of parts separated by a separator colon - numberOfParts := 0 - // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. - left := 0 - // right represents the right-most cursor in raw incremented with the loop. Note this - // starts at index 1 as index 0 is already handle above as a special case. - for right := 1; right < len(raw); right++ { - // stop parsing if reached maximum number of parts - if n >= 0 && numberOfParts >= n { - break - } - if raw[right] != ':' { - continue - } - potentialDriveLetter := raw[right-1] - if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { - if right > 1 { - beforePotentialDriveLetter := raw[right-2] - // Only `:` or `\\` are checked (`/` could fall into the case of `/tmp/q:/foo`) - if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '\\' { - // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. - } - // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. - } else { - // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - } - // need to take care of the last part - if left < len(raw) { - if n >= 0 && numberOfParts >= n { - // if the maximum number of parts is reached, just append the rest to the last part - // left-1 is at the last `:` that needs to be included since not considered a separator. - array[n-1] += raw[left-1:] - } else { - array = append(array, raw[left:]) - } - } - return array -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/runtime.go b/vendor/github.com/docker/docker/runconfig/opts/runtime.go deleted file mode 100644 index 4361b3ce0..000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/runtime.go +++ /dev/null @@ -1,79 +0,0 @@ -package opts - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" -) - -// RuntimeOpt defines a map of Runtimes -type RuntimeOpt struct { - name string - stockRuntimeName string - values *map[string]types.Runtime -} - -// NewNamedRuntimeOpt creates a new RuntimeOpt -func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { - if ref == nil { - ref = &map[string]types.Runtime{} - } - return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *RuntimeOpt) Name() string { - return o.name -} - -// Set validates and updates the list of Runtimes -func (o *RuntimeOpt) Set(val string) error { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.TrimSpace(parts[0]) - parts[1] = strings.TrimSpace(parts[1]) - if parts[0] == "" || parts[1] == "" { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.ToLower(parts[0]) - if parts[0] == o.stockRuntimeName { - return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) - } - - if _, ok := (*o.values)[parts[0]]; ok { - return fmt.Errorf("runtime '%s' was already defined", parts[0]) - } - - (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} - - return nil -} - -// String returns Runtime values as a string. -func (o *RuntimeOpt) String() string { - var out []string - for k := range *o.values { - out = append(out, k) - } - - return fmt.Sprintf("%v", out) -} - -// GetMap returns a map of Runtimes (name: path) -func (o *RuntimeOpt) GetMap() map[string]types.Runtime { - if o.values != nil { - return *o.values - } - - return map[string]types.Runtime{} -} - -// Type returns the type of the option -func (o *RuntimeOpt) Type() string { - return "runtime" -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go b/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go deleted file mode 100644 index 502432429..000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go +++ /dev/null @@ -1,111 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/go-units" -) - -// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) - -// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := units.RAMInBytes(split[1]) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := strconv.ParseUint(split[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ThrottledeviceOpt defines a map of ThrottleDevices -type ThrottledeviceOpt struct { - values []*blkiodev.ThrottleDevice - validator ValidatorThrottleFctType -} - -// NewThrottledeviceOpt creates a new ThrottledeviceOpt -func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { - values := []*blkiodev.ThrottleDevice{} - return ThrottledeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt -func (opt *ThrottledeviceOpt) Set(val string) error { - var value *blkiodev.ThrottleDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns ThrottledeviceOpt values as a string. -func (opt *ThrottledeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to ThrottleDevices. -func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { - var throttledevice []*blkiodev.ThrottleDevice - throttledevice = append(throttledevice, opt.values...) - - return throttledevice -} - -// Type returns the option type -func (opt *ThrottledeviceOpt) Type() string { - return "throttled-device" -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/ulimit.go b/vendor/github.com/docker/docker/runconfig/opts/ulimit.go deleted file mode 100644 index 5adfe3085..000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/ulimit.go +++ /dev/null @@ -1,57 +0,0 @@ -package opts - -import ( - "fmt" - - "github.com/docker/go-units" -) - -// UlimitOpt defines a map of Ulimits -type UlimitOpt struct { - values *map[string]*units.Ulimit -} - -// NewUlimitOpt creates a new UlimitOpt -func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { - if ref == nil { - ref = &map[string]*units.Ulimit{} - } - return &UlimitOpt{ref} -} - -// Set validates a Ulimit and sets its name as a key in UlimitOpt -func (o *UlimitOpt) Set(val string) error { - l, err := units.ParseUlimit(val) - if err != nil { - return err - } - - (*o.values)[l.Name] = l - - return nil -} - -// String returns Ulimit values as a string. -func (o *UlimitOpt) String() string { - var out []string - for _, v := range *o.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to Ulimits. -func (o *UlimitOpt) GetList() []*units.Ulimit { - var ulimits []*units.Ulimit - for _, v := range *o.values { - ulimits = append(ulimits, v) - } - - return ulimits -} - -// Type returns the option type -func (o *UlimitOpt) Type() string { - return "ulimit" -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go b/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go deleted file mode 100644 index 2a5da6da0..000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go +++ /dev/null @@ -1,89 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/api/types/blkiodev" -) - -// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) - -// ValidateWeightDevice validates that the specified string has a valid device-weight format. -func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - weight, err := strconv.ParseUint(split[1], 10, 0) - if err != nil { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - if weight > 0 && (weight < 10 || weight > 1000) { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - - return &blkiodev.WeightDevice{ - Path: split[0], - Weight: uint16(weight), - }, nil -} - -// WeightdeviceOpt defines a map of WeightDevices -type WeightdeviceOpt struct { - values []*blkiodev.WeightDevice - validator ValidatorWeightFctType -} - -// NewWeightdeviceOpt creates a new WeightdeviceOpt -func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { - values := []*blkiodev.WeightDevice{} - return WeightdeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt -func (opt *WeightdeviceOpt) Set(val string) error { - var value *blkiodev.WeightDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns WeightdeviceOpt values as a string. -func (opt *WeightdeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to WeightDevices. -func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { - var weightdevice []*blkiodev.WeightDevice - for _, v := range opt.values { - weightdevice = append(weightdevice, v) - } - - return weightdevice -} - -// Type returns the option type -func (opt *WeightdeviceOpt) Type() string { - return "weighted-device" -} diff --git a/vendor/github.com/mailgun/manners/LICENSE b/vendor/github.com/mailgun/manners/LICENSE deleted file mode 100644 index 91ef5beed..000000000 --- a/vendor/github.com/mailgun/manners/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2014 Braintree, a division of PayPal, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mailgun/manners/interfaces.go b/vendor/github.com/mailgun/manners/interfaces.go deleted file mode 100644 index fd0732857..000000000 --- a/vendor/github.com/mailgun/manners/interfaces.go +++ /dev/null @@ -1,7 +0,0 @@ -package manners - -type waitGroup interface { - Add(int) - Done() - Wait() -} diff --git a/vendor/github.com/mailgun/manners/listener.go b/vendor/github.com/mailgun/manners/listener.go deleted file mode 100644 index 63d2db56a..000000000 --- a/vendor/github.com/mailgun/manners/listener.go +++ /dev/null @@ -1,155 +0,0 @@ -package manners - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "os" - "sync" - "time" -) - -// NewListener wraps an existing listener for use with -// GracefulServer. -// -// Note that you generally don't need to use this directly as -// GracefulServer will automatically wrap any non-graceful listeners -// supplied to it. -func NewListener(l net.Listener) *GracefulListener { - return &GracefulListener{ - listener: l, - mutex: &sync.RWMutex{}, - open: true, - } -} - -// A GracefulListener differs from a standard net.Listener in one way: if -// Accept() is called after it is gracefully closed, it returns a -// listenerAlreadyClosed error. The GracefulServer will ignore this error. -type GracefulListener struct { - listener net.Listener - open bool - mutex *sync.RWMutex -} - -func (l *GracefulListener) isClosed() bool { - l.mutex.RLock() - defer l.mutex.RUnlock() - return !l.open -} - -func (l *GracefulListener) Addr() net.Addr { - return l.listener.Addr() -} - -// Accept implements the Accept method in the Listener interface. -func (l *GracefulListener) Accept() (net.Conn, error) { - conn, err := l.listener.Accept() - if err != nil { - if l.isClosed() { - err = fmt.Errorf("listener already closed: err=(%s)", err) - } - return nil, err - } - return conn, nil -} - -// Close tells the wrapped listener to stop listening. It is idempotent. -func (l *GracefulListener) Close() error { - l.mutex.Lock() - defer l.mutex.Unlock() - if !l.open { - return nil - } - l.open = false - return l.listener.Close() -} - -func (l *GracefulListener) GetFile() (*os.File, error) { - return getListenerFile(l.listener) -} - -func (l *GracefulListener) Clone() (net.Listener, error) { - l.mutex.Lock() - defer l.mutex.Unlock() - - if !l.open { - return nil, errors.New("listener is already closed") - } - - file, err := l.GetFile() - if err != nil { - return nil, err - } - defer file.Close() - - fl, err := net.FileListener(file) - if nil != err { - return nil, err - } - return fl, nil -} - -// A listener implements a network listener (net.Listener) for TLS connections. -// direct lift from crypto/tls.go -type TLSListener struct { - net.Listener - config *tls.Config -} - -// Accept waits for and returns the next incoming TLS connection. -// The returned connection c is a *tls.Conn. -func (l *TLSListener) Accept() (c net.Conn, err error) { - c, err = l.Listener.Accept() - if err != nil { - return - } - c = tls.Server(c, l.config) - return -} - -// NewListener creates a Listener which accepts connections from an inner -// Listener and wraps each connection with Server. -// The configuration config must be non-nil and must have -// at least one certificate. -func NewTLSListener(inner net.Listener, config *tls.Config) net.Listener { - l := new(TLSListener) - l.Listener = inner - l.config = config - return l -} - -// TCPKeepAliveListener sets TCP keep-alive timeouts on accepted -// connections. It's used by ListenAndServe and ListenAndServeTLS so -// dead TCP connections (e.g. closing laptop mid-download) eventually -// go away. -// -// direct lift from net/http/server.go -type TCPKeepAliveListener struct { - *net.TCPListener -} - -func (ln TCPKeepAliveListener) Accept() (c net.Conn, err error) { - tc, err := ln.AcceptTCP() - if err != nil { - return - } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - return tc, nil -} - -func getListenerFile(listener net.Listener) (*os.File, error) { - switch t := listener.(type) { - case *net.TCPListener: - return t.File() - case *net.UnixListener: - return t.File() - case TCPKeepAliveListener: - return t.TCPListener.File() - case *TLSListener: - return getListenerFile(t.Listener) - } - return nil, fmt.Errorf("Unsupported listener: %T", listener) -} diff --git a/vendor/github.com/mailgun/manners/server.go b/vendor/github.com/mailgun/manners/server.go deleted file mode 100644 index e6ba85002..000000000 --- a/vendor/github.com/mailgun/manners/server.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Package manners provides a wrapper for a standard net/http server that -ensures all active HTTP client have completed their current request -before the server shuts down. - -It can be used a drop-in replacement for the standard http package, -or can wrap a pre-configured Server. - -eg. - - http.Handle("/hello", func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Hello\n")) - }) - - log.Fatal(manners.ListenAndServe(":8080", nil)) - -or for a customized server: - - s := manners.NewWithServer(&http.Server{ - Addr: ":8080", - Handler: myHandler, - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, - }) - log.Fatal(s.ListenAndServe()) - -The server will shut down cleanly when the Close() method is called: - - go func() { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt, os.Kill) - <-sigchan - log.Info("Shutting down...") - manners.Close() - }() - - http.Handle("/hello", myHandler) - log.Fatal(manners.ListenAndServe(":8080", nil)) -*/ -package manners - -import ( - "crypto/tls" - "net" - "net/http" - "os" - "sync" - "sync/atomic" - "unsafe" -) - -// StateHandler can be called by the server if the state of the connection changes. -// Notice that it passed previous state and the new state as parameters. -type StateHandler func(net.Conn, http.ConnState, http.ConnState) - -// Options used by NewWithOptions to provide parameters for a GracefulServer -// instance to be created. -type Options struct { - Server *http.Server - StateHandler StateHandler - Listener net.Listener -} - -// A GracefulServer maintains a WaitGroup that counts how many in-flight -// requests the server is handling. When it receives a shutdown signal, -// it stops accepting new requests but does not actually shut down until -// all in-flight requests terminate. -// -// GracefulServer embeds the underlying net/http.Server making its non-override -// methods and properties avaiable. -// -// It must be initialized by calling NewServer or NewWithServer -type GracefulServer struct { - *http.Server - shutdown chan bool - shutdownFinished chan bool - wg waitGroup - routinesCount int32 - listener *GracefulListener - stateHandler StateHandler - connToProps map[net.Conn]connProperties - connToPropsMtx sync.RWMutex - - up chan net.Listener // Only used by test code. -} - -// NewServer creates a new GracefulServer. -func NewServer() *GracefulServer { - return NewWithOptions(Options{}) -} - -// NewWithServer wraps an existing http.Server object and returns a -// GracefulServer that supports all of the original Server operations. -func NewWithServer(s *http.Server) *GracefulServer { - return NewWithOptions(Options{Server: s}) -} - -// NewWithOptions creates a GracefulServer instance with the specified options. -func NewWithOptions(o Options) *GracefulServer { - var listener *GracefulListener - if o.Listener != nil { - g, ok := o.Listener.(*GracefulListener) - if !ok { - listener = NewListener(o.Listener) - } else { - listener = g - } - } - if o.Server == nil { - o.Server = new(http.Server) - } - return &GracefulServer{ - Server: o.Server, - listener: listener, - stateHandler: o.StateHandler, - shutdown: make(chan bool), - shutdownFinished: make(chan bool, 1), - wg: new(sync.WaitGroup), - connToProps: make(map[net.Conn]connProperties), - } -} - -// Close stops the server from accepting new requets and begins shutting down. -// It returns true if it's the first time Close is called. -func (gs *GracefulServer) Close() bool { - return <-gs.shutdown -} - -// BlockingClose is similar to Close, except that it blocks until the last -// connection has been closed. -func (gs *GracefulServer) BlockingClose() bool { - result := gs.Close() - <-gs.shutdownFinished - return result -} - -// ListenAndServe provides a graceful equivalent of net/http.Serve.ListenAndServe. -func (gs *GracefulServer) ListenAndServe() error { - if gs.listener == nil { - oldListener, err := net.Listen("tcp", gs.Addr) - if err != nil { - return err - } - gs.listener = NewListener(oldListener.(*net.TCPListener)) - } - return gs.Serve(gs.listener) -} - -// ListenAndServeTLS provides a graceful equivalent of net/http.Serve.ListenAndServeTLS. -func (gs *GracefulServer) ListenAndServeTLS(certFile, keyFile string) error { - // direct lift from net/http/server.go - addr := gs.Addr - if addr == "" { - addr = ":https" - } - config := &tls.Config{} - if gs.TLSConfig != nil { - *config = *gs.TLSConfig - } - if config.NextProtos == nil { - config.NextProtos = []string{"http/1.1"} - } - - var err error - config.Certificates = make([]tls.Certificate, 1) - config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - - return gs.ListenAndServeTLSWithConfig(config) -} - -// ListenAndServeTLS provides a graceful equivalent of net/http.Serve.ListenAndServeTLS. -func (gs *GracefulServer) ListenAndServeTLSWithConfig(config *tls.Config) error { - addr := gs.Addr - if addr == "" { - addr = ":https" - } - - if gs.listener == nil { - ln, err := net.Listen("tcp", addr) - if err != nil { - return err - } - - tlsListener := NewTLSListener(TCPKeepAliveListener{ln.(*net.TCPListener)}, config) - gs.listener = NewListener(tlsListener) - } - return gs.Serve(gs.listener) -} - -func (gs *GracefulServer) GetFile() (*os.File, error) { - return gs.listener.GetFile() -} - -func (gs *GracefulServer) HijackListener(s *http.Server, config *tls.Config) (*GracefulServer, error) { - listenerUnsafePtr := unsafe.Pointer(gs.listener) - listener, err := (*GracefulListener)(atomic.LoadPointer(&listenerUnsafePtr)).Clone() - if err != nil { - return nil, err - } - - if config != nil { - listener = NewTLSListener(TCPKeepAliveListener{listener.(*net.TCPListener)}, config) - } - - other := NewWithServer(s) - other.listener = NewListener(listener) - return other, nil -} - -// Serve provides a graceful equivalent net/http.Server.Serve. -// -// If listener is not an instance of *GracefulListener it will be wrapped -// to become one. -func (gs *GracefulServer) Serve(listener net.Listener) error { - // Accept a net.Listener to preserve the interface compatibility with the - // standard http.Server. If it is not a GracefulListener then wrap it into - // one. - gracefulListener, ok := listener.(*GracefulListener) - if !ok { - gracefulListener = NewListener(listener) - listener = gracefulListener - } - listenerUnsafePtr := unsafe.Pointer(gs.listener) - atomic.StorePointer(&listenerUnsafePtr, unsafe.Pointer(gracefulListener)) - - // Wrap the server HTTP handler into graceful one, that will close kept - // alive connections if a new request is received after shutdown. - gracefulHandler := newGracefulHandler(gs.Server.Handler) - gs.Server.Handler = gracefulHandler - - // Start a goroutine that waits for a shutdown signal and will stop the - // listener when it receives the signal. That in turn will result in - // unblocking of the http.Serve call. - go func() { - gs.shutdown <- true - close(gs.shutdown) - gracefulHandler.Close() - gs.Server.SetKeepAlivesEnabled(false) - gracefulListener.Close() - }() - - originalConnState := gs.Server.ConnState - - // s.ConnState is invoked by the net/http.Server every time a connection - // changes state. It keeps track of each connection's state over time, - // enabling manners to handle persisted connections correctly. - gs.ConnState = func(conn net.Conn, newState http.ConnState) { - gs.connToPropsMtx.RLock() - connProps := gs.connToProps[conn] - gs.connToPropsMtx.RUnlock() - - switch newState { - - case http.StateNew: - // New connection -> StateNew - connProps.protected = true - gs.StartRoutine() - - case http.StateActive: - // (StateNew, StateIdle) -> StateActive - if gracefulHandler.IsClosed() { - conn.Close() - break - } - - if !connProps.protected { - connProps.protected = true - gs.StartRoutine() - } - - default: - // (StateNew, StateActive) -> (StateIdle, StateClosed, StateHiJacked) - if connProps.protected { - gs.FinishRoutine() - connProps.protected = false - } - } - - if gs.stateHandler != nil { - gs.stateHandler(conn, connProps.state, newState) - } - connProps.state = newState - - gs.connToPropsMtx.Lock() - if newState == http.StateClosed || newState == http.StateHijacked { - delete(gs.connToProps, conn) - } else { - gs.connToProps[conn] = connProps - } - gs.connToPropsMtx.Unlock() - - if originalConnState != nil { - originalConnState(conn, newState) - } - } - - // A hook to allow the server to notify others when it is ready to receive - // requests; only used by tests. - if gs.up != nil { - gs.up <- listener - } - - err := gs.Server.Serve(listener) - // An error returned on shutdown is not worth reporting. - if err != nil && gracefulHandler.IsClosed() { - err = nil - } - - // Wait for pending requests to complete regardless the Serve result. - gs.wg.Wait() - gs.shutdownFinished <- true - return err -} - -// StartRoutine increments the server's WaitGroup. Use this if a web request -// starts more goroutines and these goroutines are not guaranteed to finish -// before the request. -func (gs *GracefulServer) StartRoutine() { - gs.wg.Add(1) - atomic.AddInt32(&gs.routinesCount, 1) -} - -// FinishRoutine decrements the server's WaitGroup. Use this to complement -// StartRoutine(). -func (gs *GracefulServer) FinishRoutine() { - gs.wg.Done() - atomic.AddInt32(&gs.routinesCount, -1) -} - -// RoutinesCount returns the number of currently running routines -func (gs *GracefulServer) RoutinesCount() int { - return int(atomic.LoadInt32(&gs.routinesCount)) -} - -// gracefulHandler is used by GracefulServer to prevent calling ServeHTTP on -// to be closed kept-alive connections during the server shutdown. -type gracefulHandler struct { - closed int32 // accessed atomically. - wrapped http.Handler -} - -func newGracefulHandler(wrapped http.Handler) *gracefulHandler { - return &gracefulHandler{ - wrapped: wrapped, - } -} - -func (gh *gracefulHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if atomic.LoadInt32(&gh.closed) == 0 { - gh.wrapped.ServeHTTP(w, r) - return - } - r.Body.Close() - // Server is shutting down at this moment, and the connection that this - // handler is being called on is about to be closed. So we do not need to - // actually execute the handler logic. -} - -func (gh *gracefulHandler) Close() { - atomic.StoreInt32(&gh.closed, 1) -} - -func (gh *gracefulHandler) IsClosed() bool { - return atomic.LoadInt32(&gh.closed) == 1 -} - -// connProperties is used to keep track of various properties of connections -// maintained by a gracefulServer. -type connProperties struct { - // Last known connection state. - state http.ConnState - // Tells whether the connection is going to defer server shutdown until - // the current HTTP request is completed. - protected bool -} diff --git a/vendor/github.com/mailgun/manners/static.go b/vendor/github.com/mailgun/manners/static.go deleted file mode 100644 index b53950675..000000000 --- a/vendor/github.com/mailgun/manners/static.go +++ /dev/null @@ -1,47 +0,0 @@ -package manners - -import ( - "net" - "net/http" - "sync" -) - -var ( - defaultServer *GracefulServer - defaultServerLock = &sync.Mutex{} -) - -func init() { - defaultServerLock.Lock() -} - -// ListenAndServe provides a graceful version of the function provided by the -// net/http package. Call Close() to stop the server. -func ListenAndServe(addr string, handler http.Handler) error { - defaultServer = NewWithServer(&http.Server{Addr: addr, Handler: handler}) - defaultServerLock.Unlock() - return defaultServer.ListenAndServe() -} - -// ListenAndServeTLS provides a graceful version of the function provided by the -// net/http package. Call Close() to stop the server. -func ListenAndServeTLS(addr string, certFile string, keyFile string, handler http.Handler) error { - defaultServer = NewWithServer(&http.Server{Addr: addr, Handler: handler}) - defaultServerLock.Unlock() - return defaultServer.ListenAndServeTLS(certFile, keyFile) -} - -// Serve provides a graceful version of the function provided by the net/http -// package. Call Close() to stop the server. -func Serve(l net.Listener, handler http.Handler) error { - defaultServer = NewWithServer(&http.Server{Handler: handler}) - defaultServerLock.Unlock() - return defaultServer.Serve(l) -} - -// Shuts down the default server used by ListenAndServe, ListenAndServeTLS and -// Serve. It returns true if it's the first time Close is called. -func Close() bool { - defaultServerLock.Lock() - return defaultServer.Close() -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go deleted file mode 100644 index e0f3ca165..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go +++ /dev/null @@ -1,61 +0,0 @@ -package configs - -import "fmt" - -// blockIODevice holds major:minor format supported in blkio cgroup -type blockIODevice struct { - // Major is the device's major number - Major int64 `json:"major"` - // Minor is the device's minor number - Minor int64 `json:"minor"` -} - -// WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair -type WeightDevice struct { - blockIODevice - // Weight is the bandwidth rate for the device, range is from 10 to 1000 - Weight uint16 `json:"weight"` - // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only - LeafWeight uint16 `json:"leafWeight"` -} - -// NewWeightDevice returns a configured WeightDevice pointer -func NewWeightDevice(major, minor int64, weight, leafWeight uint16) *WeightDevice { - wd := &WeightDevice{} - wd.Major = major - wd.Minor = minor - wd.Weight = weight - wd.LeafWeight = leafWeight - return wd -} - -// WeightString formats the struct to be writable to the cgroup specific file -func (wd *WeightDevice) WeightString() string { - return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight) -} - -// LeafWeightString formats the struct to be writable to the cgroup specific file -func (wd *WeightDevice) LeafWeightString() string { - return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight) -} - -// ThrottleDevice struct holds a `major:minor rate_per_second` pair -type ThrottleDevice struct { - blockIODevice - // Rate is the IO rate limit per cgroup per device - Rate uint64 `json:"rate"` -} - -// NewThrottleDevice returns a configured ThrottleDevice pointer -func NewThrottleDevice(major, minor int64, rate uint64) *ThrottleDevice { - td := &ThrottleDevice{} - td.Major = major - td.Minor = minor - td.Rate = rate - return td -} - -// String formats the struct to be writable to the cgroup specific file -func (td *ThrottleDevice) String() string { - return fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go deleted file mode 100644 index bd6f69b82..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build linux freebsd - -package configs - -type FreezerState string - -const ( - Undefined FreezerState = "" - Frozen FreezerState = "FROZEN" - Thawed FreezerState = "THAWED" -) - -type Cgroup struct { - // Deprecated, use Path instead - Name string `json:"name,omitempty"` - - // name of parent of cgroup or slice - // Deprecated, use Path instead - Parent string `json:"parent,omitempty"` - - // Path specifies the path to cgroups that are created and/or joined by the container. - // The path is assumed to be relative to the host system cgroup mountpoint. - Path string `json:"path"` - - // ScopePrefix decribes prefix for the scope name - ScopePrefix string `json:"scope_prefix"` - - // Paths represent the absolute cgroups paths to join. - // This takes precedence over Path. - Paths map[string]string - - // Resources contains various cgroups settings to apply - *Resources -} - -type Resources struct { - // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list. - // Deprecated - AllowAllDevices *bool `json:"allow_all_devices,omitempty"` - // Deprecated - AllowedDevices []*Device `json:"allowed_devices,omitempty"` - // Deprecated - DeniedDevices []*Device `json:"denied_devices,omitempty"` - - Devices []*Device `json:"devices"` - - // Memory limit (in bytes) - Memory int64 `json:"memory"` - - // Memory reservation or soft_limit (in bytes) - MemoryReservation int64 `json:"memory_reservation"` - - // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwap int64 `json:"memory_swap"` - - // Kernel memory limit (in bytes) - KernelMemory int64 `json:"kernel_memory"` - - // Kernel memory limit for TCP use (in bytes) - KernelMemoryTCP int64 `json:"kernel_memory_tcp"` - - // CPU shares (relative weight vs. other containers) - CpuShares int64 `json:"cpu_shares"` - - // CPU hardcap limit (in usecs). Allowed cpu time in a given period. - CpuQuota int64 `json:"cpu_quota"` - - // CPU period to be used for hardcapping (in usecs). 0 to use system default. - CpuPeriod int64 `json:"cpu_period"` - - // How many time CPU will use in realtime scheduling (in usecs). - CpuRtRuntime int64 `json:"cpu_rt_quota"` - - // CPU period to be used for realtime scheduling (in usecs). - CpuRtPeriod int64 `json:"cpu_rt_period"` - - // CPU to use - CpusetCpus string `json:"cpuset_cpus"` - - // MEM to use - CpusetMems string `json:"cpuset_mems"` - - // Process limit; set <= `0' to disable limit. - PidsLimit int64 `json:"pids_limit"` - - // Specifies per cgroup weight, range is from 10 to 1000. - BlkioWeight uint16 `json:"blkio_weight"` - - // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only - BlkioLeafWeight uint16 `json:"blkio_leaf_weight"` - - // Weight per cgroup per device, can override BlkioWeight. - BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"` - - // IO read rate limit per cgroup per device, bytes per second. - BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"` - - // IO write rate limit per cgroup per divice, bytes per second. - BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"` - - // IO read rate limit per cgroup per device, IO per second. - BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"` - - // IO write rate limit per cgroup per device, IO per second. - BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"` - - // set the freeze value for the process - Freezer FreezerState `json:"freezer"` - - // Hugetlb limit (in bytes) - HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"` - - // Whether to disable OOM Killer - OomKillDisable bool `json:"oom_kill_disable"` - - // Tuning swappiness behaviour per cgroup - MemorySwappiness *int64 `json:"memory_swappiness"` - - // Set priority of network traffic for container - NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"` - - // Set class identifier for container's network packets - NetClsClassid uint32 `json:"net_cls_classid"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go deleted file mode 100644 index 95e2830a4..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !windows,!linux,!freebsd - -package configs - -type Cgroup struct { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go deleted file mode 100644 index d74847b0d..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package configs - -// TODO Windows: This can ultimately be entirely factored out on Windows as -// cgroups are a Unix-specific construct. -type Cgroup struct { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go deleted file mode 100644 index 3c38191b3..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ /dev/null @@ -1,328 +0,0 @@ -package configs - -import ( - "bytes" - "encoding/json" - "fmt" - "os/exec" - "time" - - "github.com/Sirupsen/logrus" -) - -type Rlimit struct { - Type int `json:"type"` - Hard uint64 `json:"hard"` - Soft uint64 `json:"soft"` -} - -// IDMap represents UID/GID Mappings for User Namespaces. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -// Seccomp represents syscall restrictions -// By default, only the native architecture of the kernel is allowed to be used -// for syscalls. Additional architectures can be added by specifying them in -// Architectures. -type Seccomp struct { - DefaultAction Action `json:"default_action"` - Architectures []string `json:"architectures"` - Syscalls []*Syscall `json:"syscalls"` -} - -// Action is taken upon rule match in Seccomp -type Action int - -const ( - Kill Action = iota + 1 - Errno - Trap - Allow - Trace -) - -// Operator is a comparison operator to be used when matching syscall arguments in Seccomp -type Operator int - -const ( - EqualTo Operator = iota + 1 - NotEqualTo - GreaterThan - GreaterThanOrEqualTo - LessThan - LessThanOrEqualTo - MaskEqualTo -) - -// Arg is a rule to match a specific syscall argument in Seccomp -type Arg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"value_two"` - Op Operator `json:"op"` -} - -// Syscall is a rule to match a syscall in Seccomp -type Syscall struct { - Name string `json:"name"` - Action Action `json:"action"` - Args []*Arg `json:"args"` -} - -// TODO Windows. Many of these fields should be factored out into those parts -// which are common across platforms, and those which are platform specific. - -// Config defines configuration options for executing a process inside a contained environment. -type Config struct { - // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs - // This is a common option when the container is running in ramdisk - NoPivotRoot bool `json:"no_pivot_root"` - - // ParentDeathSignal specifies the signal that is sent to the container's process in the case - // that the parent process dies. - ParentDeathSignal int `json:"parent_death_signal"` - - // PivotDir allows a custom directory inside the container's root filesystem to be used as pivot, when NoPivotRoot is not set. - // When a custom PivotDir not set, a temporary dir inside the root filesystem will be used. The pivot dir needs to be writeable. - // This is required when using read only root filesystems. In these cases, a read/writeable path can be (bind) mounted somewhere inside the root filesystem to act as pivot. - PivotDir string `json:"pivot_dir"` - - // Path to a directory containing the container's root filesystem. - Rootfs string `json:"rootfs"` - - // Readonlyfs will remount the container's rootfs as readonly where only externally mounted - // bind mounts are writtable. - Readonlyfs bool `json:"readonlyfs"` - - // Specifies the mount propagation flags to be applied to /. - RootPropagation int `json:"rootPropagation"` - - // Mounts specify additional source and destination paths that will be mounted inside the container's - // rootfs and mount namespace if specified - Mounts []*Mount `json:"mounts"` - - // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well! - Devices []*Device `json:"devices"` - - MountLabel string `json:"mount_label"` - - // Hostname optionally sets the container's hostname if provided - Hostname string `json:"hostname"` - - // Namespaces specifies the container's namespaces that it should setup when cloning the init process - // If a namespace is not provided that namespace is shared from the container's parent process - Namespaces Namespaces `json:"namespaces"` - - // Capabilities specify the capabilities to keep when executing the process inside the container - // All capbilities not specified will be dropped from the processes capability mask - Capabilities []string `json:"capabilities"` - - // Networks specifies the container's network setup to be created - Networks []*Network `json:"networks"` - - // Routes can be specified to create entries in the route table as the container is started - Routes []*Route `json:"routes"` - - // Cgroups specifies specific cgroup settings for the various subsystems that the container is - // placed into to limit the resources the container has available - Cgroups *Cgroup `json:"cgroups"` - - // AppArmorProfile specifies the profile to apply to the process running in the container and is - // change at the time the process is execed - AppArmorProfile string `json:"apparmor_profile,omitempty"` - - // ProcessLabel specifies the label to apply to the process running in the container. It is - // commonly used by selinux - ProcessLabel string `json:"process_label,omitempty"` - - // Rlimits specifies the resource limits, such as max open files, to set in the container - // If Rlimits are not set, the container will inherit rlimits from the parent process - Rlimits []Rlimit `json:"rlimits,omitempty"` - - // OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores - // for a process. Valid values are between the range [-1000, '1000'], where processes with - // higher scores are preferred for being killed. - // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/ - OomScoreAdj int `json:"oom_score_adj"` - - // UidMappings is an array of User ID mappings for User Namespaces - UidMappings []IDMap `json:"uid_mappings"` - - // GidMappings is an array of Group ID mappings for User Namespaces - GidMappings []IDMap `json:"gid_mappings"` - - // MaskPaths specifies paths within the container's rootfs to mask over with a bind - // mount pointing to /dev/null as to prevent reads of the file. - MaskPaths []string `json:"mask_paths"` - - // ReadonlyPaths specifies paths within the container's rootfs to remount as read-only - // so that these files prevent any writes. - ReadonlyPaths []string `json:"readonly_paths"` - - // Sysctl is a map of properties and their values. It is the equivalent of using - // sysctl -w my.property.name value in Linux. - Sysctl map[string]string `json:"sysctl"` - - // Seccomp allows actions to be taken whenever a syscall is made within the container. - // A number of rules are given, each having an action to be taken if a syscall matches it. - // A default action to be taken if no rules match is also given. - Seccomp *Seccomp `json:"seccomp"` - - // NoNewPrivileges controls whether processes in the container can gain additional privileges. - NoNewPrivileges bool `json:"no_new_privileges,omitempty"` - - // Hooks are a collection of actions to perform at various container lifecycle events. - // CommandHooks are serialized to JSON, but other hooks are not. - Hooks *Hooks - - // Version is the version of opencontainer specification that is supported. - Version string `json:"version"` - - // Labels are user defined metadata that is stored in the config and populated on the state - Labels []string `json:"labels"` - - // NoNewKeyring will not allocated a new session keyring for the container. It will use the - // callers keyring in this case. - NoNewKeyring bool `json:"no_new_keyring"` -} - -type Hooks struct { - // Prestart commands are executed after the container namespaces are created, - // but before the user supplied command is executed from init. - Prestart []Hook - - // Poststart commands are executed after the container init process starts. - Poststart []Hook - - // Poststop commands are executed after the container init process exits. - Poststop []Hook -} - -func (hooks *Hooks) UnmarshalJSON(b []byte) error { - var state struct { - Prestart []CommandHook - Poststart []CommandHook - Poststop []CommandHook - } - - if err := json.Unmarshal(b, &state); err != nil { - return err - } - - deserialize := func(shooks []CommandHook) (hooks []Hook) { - for _, shook := range shooks { - hooks = append(hooks, shook) - } - - return hooks - } - - hooks.Prestart = deserialize(state.Prestart) - hooks.Poststart = deserialize(state.Poststart) - hooks.Poststop = deserialize(state.Poststop) - return nil -} - -func (hooks Hooks) MarshalJSON() ([]byte, error) { - serialize := func(hooks []Hook) (serializableHooks []CommandHook) { - for _, hook := range hooks { - switch chook := hook.(type) { - case CommandHook: - serializableHooks = append(serializableHooks, chook) - default: - logrus.Warnf("cannot serialize hook of type %T, skipping", hook) - } - } - - return serializableHooks - } - - return json.Marshal(map[string]interface{}{ - "prestart": serialize(hooks.Prestart), - "poststart": serialize(hooks.Poststart), - "poststop": serialize(hooks.Poststop), - }) -} - -// HookState is the payload provided to a hook on execution. -type HookState struct { - Version string `json:"ociVersion"` - ID string `json:"id"` - Pid int `json:"pid"` - Root string `json:"root"` - BundlePath string `json:"bundlePath"` -} - -type Hook interface { - // Run executes the hook with the provided state. - Run(HookState) error -} - -// NewFunctionHook will call the provided function when the hook is run. -func NewFunctionHook(f func(HookState) error) FuncHook { - return FuncHook{ - run: f, - } -} - -type FuncHook struct { - run func(HookState) error -} - -func (f FuncHook) Run(s HookState) error { - return f.run(s) -} - -type Command struct { - Path string `json:"path"` - Args []string `json:"args"` - Env []string `json:"env"` - Dir string `json:"dir"` - Timeout *time.Duration `json:"timeout"` -} - -// NewCommandHook will execute the provided command when the hook is run. -func NewCommandHook(cmd Command) CommandHook { - return CommandHook{ - Command: cmd, - } -} - -type CommandHook struct { - Command -} - -func (c Command) Run(s HookState) error { - b, err := json.Marshal(s) - if err != nil { - return err - } - cmd := exec.Cmd{ - Path: c.Path, - Args: c.Args, - Env: c.Env, - Stdin: bytes.NewReader(b), - } - errC := make(chan error, 1) - go func() { - out, err := cmd.CombinedOutput() - if err != nil { - err = fmt.Errorf("%s: %s", err, out) - } - errC <- err - }() - if c.Timeout != nil { - select { - case err := <-errC: - return err - case <-time.After(*c.Timeout): - cmd.Process.Kill() - cmd.Wait() - return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds()) - } - } - return <-errC -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go deleted file mode 100644 index a60554a7b..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build freebsd linux - -package configs - -import "fmt" - -// HostUID gets the root uid for the process on host which could be non-zero -// when user namespaces are enabled. -func (c Config) HostUID() (int, error) { - if c.Namespaces.Contains(NEWUSER) { - if c.UidMappings == nil { - return -1, fmt.Errorf("User namespaces enabled, but no user mappings found.") - } - id, found := c.hostIDFromMapping(0, c.UidMappings) - if !found { - return -1, fmt.Errorf("User namespaces enabled, but no root user mapping found.") - } - return id, nil - } - // Return default root uid 0 - return 0, nil -} - -// HostGID gets the root gid for the process on host which could be non-zero -// when user namespaces are enabled. -func (c Config) HostGID() (int, error) { - if c.Namespaces.Contains(NEWUSER) { - if c.GidMappings == nil { - return -1, fmt.Errorf("User namespaces enabled, but no gid mappings found.") - } - id, found := c.hostIDFromMapping(0, c.GidMappings) - if !found { - return -1, fmt.Errorf("User namespaces enabled, but no root group mapping found.") - } - return id, nil - } - // Return default root gid 0 - return 0, nil -} - -// Utility function that gets a host ID for a container ID from user namespace map -// if that ID is present in the map. -func (c Config) hostIDFromMapping(containerID int, uMap []IDMap) (int, bool) { - for _, m := range uMap { - if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (containerID - m.ContainerID) - return hostID, true - } - } - return -1, false -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go deleted file mode 100644 index 8701bb212..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go +++ /dev/null @@ -1,57 +0,0 @@ -package configs - -import ( - "fmt" - "os" -) - -const ( - Wildcard = -1 -) - -// TODO Windows: This can be factored out in the future - -type Device struct { - // Device type, block, char, etc. - Type rune `json:"type"` - - // Path to the device. - Path string `json:"path"` - - // Major is the device's major number. - Major int64 `json:"major"` - - // Minor is the device's minor number. - Minor int64 `json:"minor"` - - // Cgroup permissions format, rwm. - Permissions string `json:"permissions"` - - // FileMode permission bits for the device. - FileMode os.FileMode `json:"file_mode"` - - // Uid of the device. - Uid uint32 `json:"uid"` - - // Gid of the device. - Gid uint32 `json:"gid"` - - // Write the file to the allowed list - Allow bool `json:"allow"` -} - -func (d *Device) CgroupString() string { - return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions) -} - -func (d *Device) Mkdev() int { - return int((d.Major << 8) | (d.Minor & 0xff) | ((d.Minor & 0xfff00) << 12)) -} - -// deviceNumberString converts the device number to a string return result. -func deviceNumberString(number int64) string { - if number == Wildcard { - return "*" - } - return fmt.Sprint(number) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go deleted file mode 100644 index ba1f437f3..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go +++ /dev/null @@ -1,125 +0,0 @@ -// +build linux freebsd - -package configs - -var ( - // DefaultSimpleDevices are devices that are to be both allowed and created. - DefaultSimpleDevices = []*Device{ - // /dev/null and zero - { - Path: "/dev/null", - Type: 'c', - Major: 1, - Minor: 3, - Permissions: "rwm", - FileMode: 0666, - }, - { - Path: "/dev/zero", - Type: 'c', - Major: 1, - Minor: 5, - Permissions: "rwm", - FileMode: 0666, - }, - - { - Path: "/dev/full", - Type: 'c', - Major: 1, - Minor: 7, - Permissions: "rwm", - FileMode: 0666, - }, - - // consoles and ttys - { - Path: "/dev/tty", - Type: 'c', - Major: 5, - Minor: 0, - Permissions: "rwm", - FileMode: 0666, - }, - - // /dev/urandom,/dev/random - { - Path: "/dev/urandom", - Type: 'c', - Major: 1, - Minor: 9, - Permissions: "rwm", - FileMode: 0666, - }, - { - Path: "/dev/random", - Type: 'c', - Major: 1, - Minor: 8, - Permissions: "rwm", - FileMode: 0666, - }, - } - DefaultAllowedDevices = append([]*Device{ - // allow mknod for any device - { - Type: 'c', - Major: Wildcard, - Minor: Wildcard, - Permissions: "m", - }, - { - Type: 'b', - Major: Wildcard, - Minor: Wildcard, - Permissions: "m", - }, - - { - Path: "/dev/console", - Type: 'c', - Major: 5, - Minor: 1, - Permissions: "rwm", - }, - // /dev/pts/ - pts namespaces are "coming soon" - { - Path: "", - Type: 'c', - Major: 136, - Minor: Wildcard, - Permissions: "rwm", - }, - { - Path: "", - Type: 'c', - Major: 5, - Minor: 2, - Permissions: "rwm", - }, - - // tuntap - { - Path: "", - Type: 'c', - Major: 10, - Minor: 200, - Permissions: "rwm", - }, - }, DefaultSimpleDevices...) - DefaultAutoCreatedDevices = append([]*Device{ - { - // /dev/fuse is created but not allowed. - // This is to allow java to work. Because java - // Insists on there being a /dev/fuse - // https://github.com/docker/docker/issues/514 - // https://github.com/docker/docker/issues/2393 - // - Path: "/dev/fuse", - Type: 'c', - Major: 10, - Minor: 229, - Permissions: "rwm", - }, - }, DefaultSimpleDevices...) -) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go deleted file mode 100644 index d30216380..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go +++ /dev/null @@ -1,9 +0,0 @@ -package configs - -type HugepageLimit struct { - // which type of hugepage to limit. - Pagesize string `json:"page_size"` - - // usage limit for hugepage. - Limit uint64 `json:"limit"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go deleted file mode 100644 index 9a0395eaf..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go +++ /dev/null @@ -1,14 +0,0 @@ -package configs - -import ( - "fmt" -) - -type IfPrioMap struct { - Interface string `json:"interface"` - Priority int64 `json:"priority"` -} - -func (i *IfPrioMap) CgroupString() string { - return fmt.Sprintf("%s %d", i.Interface, i.Priority) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go deleted file mode 100644 index cc770c916..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go +++ /dev/null @@ -1,30 +0,0 @@ -package configs - -type Mount struct { - // Source path for the mount. - Source string `json:"source"` - - // Destination path for the mount inside the container. - Destination string `json:"destination"` - - // Device the mount is for. - Device string `json:"device"` - - // Mount flags. - Flags int `json:"flags"` - - // Propagation Flags - PropagationFlags []int `json:"propagation_flags"` - - // Mount data applied to the mount. - Data string `json:"data"` - - // Relabel source if set, "z" indicates shared, "Z" indicates unshared. - Relabel string `json:"relabel"` - - // Optional Command to be run before Source is mounted. - PremountCmds []Command `json:"premount_cmds"` - - // Optional Command to be run after Source is mounted. - PostmountCmds []Command `json:"postmount_cmds"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go deleted file mode 100644 index a3329a31a..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go +++ /dev/null @@ -1,5 +0,0 @@ -package configs - -type NamespaceType string - -type Namespaces []Namespace diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go deleted file mode 100644 index fb4b85222..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build linux - -package configs - -import "syscall" - -func (n *Namespace) Syscall() int { - return namespaceInfo[n.Type] -} - -var namespaceInfo = map[NamespaceType]int{ - NEWNET: syscall.CLONE_NEWNET, - NEWNS: syscall.CLONE_NEWNS, - NEWUSER: syscall.CLONE_NEWUSER, - NEWIPC: syscall.CLONE_NEWIPC, - NEWUTS: syscall.CLONE_NEWUTS, - NEWPID: syscall.CLONE_NEWPID, -} - -// CloneFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare. This function returns flags only for new namespaces. -func (n *Namespaces) CloneFlags() uintptr { - var flag int - for _, v := range *n { - if v.Path != "" { - continue - } - flag |= namespaceInfo[v.Type] - } - return uintptr(flag) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go deleted file mode 100644 index 0547223a9..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !linux,!windows - -package configs - -func (n *Namespace) Syscall() int { - panic("No namespace syscall support") - return 0 -} - -// CloneFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare. This function returns flags only for new namespaces. -func (n *Namespaces) CloneFlags() uintptr { - panic("No namespace syscall support") - return uintptr(0) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go deleted file mode 100644 index b9c820d06..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go +++ /dev/null @@ -1,127 +0,0 @@ -// +build linux freebsd - -package configs - -import ( - "fmt" - "os" - "sync" -) - -const ( - NEWNET NamespaceType = "NEWNET" - NEWPID NamespaceType = "NEWPID" - NEWNS NamespaceType = "NEWNS" - NEWUTS NamespaceType = "NEWUTS" - NEWIPC NamespaceType = "NEWIPC" - NEWUSER NamespaceType = "NEWUSER" -) - -var ( - nsLock sync.Mutex - supportedNamespaces = make(map[NamespaceType]bool) -) - -// nsToFile converts the namespace type to its filename -func nsToFile(ns NamespaceType) string { - switch ns { - case NEWNET: - return "net" - case NEWNS: - return "mnt" - case NEWPID: - return "pid" - case NEWIPC: - return "ipc" - case NEWUSER: - return "user" - case NEWUTS: - return "uts" - } - return "" -} - -// IsNamespaceSupported returns whether a namespace is available or -// not -func IsNamespaceSupported(ns NamespaceType) bool { - nsLock.Lock() - defer nsLock.Unlock() - supported, ok := supportedNamespaces[ns] - if ok { - return supported - } - nsFile := nsToFile(ns) - // if the namespace type is unknown, just return false - if nsFile == "" { - return false - } - _, err := os.Stat(fmt.Sprintf("/proc/self/ns/%s", nsFile)) - // a namespace is supported if it exists and we have permissions to read it - supported = err == nil - supportedNamespaces[ns] = supported - return supported -} - -func NamespaceTypes() []NamespaceType { - return []NamespaceType{ - NEWNET, - NEWPID, - NEWNS, - NEWUTS, - NEWIPC, - NEWUSER, - } -} - -// Namespace defines configuration for each namespace. It specifies an -// alternate path that is able to be joined via setns. -type Namespace struct { - Type NamespaceType `json:"type"` - Path string `json:"path"` -} - -func (n *Namespace) GetPath(pid int) string { - if n.Path != "" { - return n.Path - } - return fmt.Sprintf("/proc/%d/ns/%s", pid, nsToFile(n.Type)) -} - -func (n *Namespaces) Remove(t NamespaceType) bool { - i := n.index(t) - if i == -1 { - return false - } - *n = append((*n)[:i], (*n)[i+1:]...) - return true -} - -func (n *Namespaces) Add(t NamespaceType, path string) { - i := n.index(t) - if i == -1 { - *n = append(*n, Namespace{Type: t, Path: path}) - return - } - (*n)[i].Path = path -} - -func (n *Namespaces) index(t NamespaceType) int { - for i, ns := range *n { - if ns.Type == t { - return i - } - } - return -1 -} - -func (n *Namespaces) Contains(t NamespaceType) bool { - return n.index(t) != -1 -} - -func (n *Namespaces) PathOf(t NamespaceType) string { - i := n.index(t) - if i == -1 { - return "" - } - return (*n)[i].Path -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go deleted file mode 100644 index 9a74033ce..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux,!freebsd - -package configs - -// Namespace defines configuration for each namespace. It specifies an -// alternate path that is able to be joined via setns. -type Namespace struct { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go deleted file mode 100644 index ccdb228e1..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go +++ /dev/null @@ -1,72 +0,0 @@ -package configs - -// Network defines configuration for a container's networking stack -// -// The network configuration can be omitted from a container causing the -// container to be setup with the host's networking stack -type Network struct { - // Type sets the networks type, commonly veth and loopback - Type string `json:"type"` - - // Name of the network interface - Name string `json:"name"` - - // The bridge to use. - Bridge string `json:"bridge"` - - // MacAddress contains the MAC address to set on the network interface - MacAddress string `json:"mac_address"` - - // Address contains the IPv4 and mask to set on the network interface - Address string `json:"address"` - - // Gateway sets the gateway address that is used as the default for the interface - Gateway string `json:"gateway"` - - // IPv6Address contains the IPv6 and mask to set on the network interface - IPv6Address string `json:"ipv6_address"` - - // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface - IPv6Gateway string `json:"ipv6_gateway"` - - // Mtu sets the mtu value for the interface and will be mirrored on both the host and - // container's interfaces if a pair is created, specifically in the case of type veth - // Note: This does not apply to loopback interfaces. - Mtu int `json:"mtu"` - - // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and - // container's interfaces if a pair is created, specifically in the case of type veth - // Note: This does not apply to loopback interfaces. - TxQueueLen int `json:"txqueuelen"` - - // HostInterfaceName is a unique name of a veth pair that resides on in the host interface of the - // container. - HostInterfaceName string `json:"host_interface_name"` - - // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - // bridge port in the case of type veth - // Note: This is unsupported on some systems. - // Note: This does not apply to loopback interfaces. - HairpinMode bool `json:"hairpin_mode"` -} - -// Routes can be specified to create entries in the route table as the container is started -// -// All of destination, source, and gateway should be either IPv4 or IPv6. -// One of the three options must be present, and omitted entries will use their -// IP family default for the route table. For IPv4 for example, setting the -// gateway to 1.2.3.4 and the interface to eth0 will set up a standard -// destination of 0.0.0.0(or *) when viewed in the route table. -type Route struct { - // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6 - Destination string `json:"destination"` - - // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6 - Source string `json:"source"` - - // Sets the gateway. Accepts IPv4 and IPv6 - Gateway string `json:"gateway"` - - // The device to set this route up for, for example: eth0 - InterfaceName string `json:"interface_name"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go deleted file mode 100644 index c02b73e3e..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go +++ /dev/null @@ -1,102 +0,0 @@ -// +build linux freebsd - -package devices - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" - - "github.com/opencontainers/runc/libcontainer/configs" -) - -var ( - ErrNotADevice = errors.New("not a device node") -) - -// Testing dependencies -var ( - osLstat = os.Lstat - ioutilReadDir = ioutil.ReadDir -) - -// Given the path to a device and it's cgroup_permissions(which cannot be easily queried) look up the information about a linux device and return that information as a Device struct. -func DeviceFromPath(path, permissions string) (*configs.Device, error) { - fileInfo, err := osLstat(path) - if err != nil { - return nil, err - } - var ( - devType rune - mode = fileInfo.Mode() - fileModePermissionBits = os.FileMode.Perm(mode) - ) - switch { - case mode&os.ModeDevice == 0: - return nil, ErrNotADevice - case mode&os.ModeCharDevice != 0: - fileModePermissionBits |= syscall.S_IFCHR - devType = 'c' - default: - fileModePermissionBits |= syscall.S_IFBLK - devType = 'b' - } - stat_t, ok := fileInfo.Sys().(*syscall.Stat_t) - if !ok { - return nil, fmt.Errorf("cannot determine the device number for device %s", path) - } - devNumber := int(stat_t.Rdev) - return &configs.Device{ - Type: devType, - Path: path, - Major: Major(devNumber), - Minor: Minor(devNumber), - Permissions: permissions, - FileMode: fileModePermissionBits, - Uid: stat_t.Uid, - Gid: stat_t.Gid, - }, nil -} - -func HostDevices() ([]*configs.Device, error) { - return getDevices("/dev") -} - -func getDevices(path string) ([]*configs.Device, error) { - files, err := ioutilReadDir(path) - if err != nil { - return nil, err - } - out := []*configs.Device{} - for _, f := range files { - switch { - case f.IsDir(): - switch f.Name() { - case "pts", "shm", "fd", "mqueue": - continue - default: - sub, err := getDevices(filepath.Join(path, f.Name())) - if err != nil { - return nil, err - } - - out = append(out, sub...) - continue - } - case f.Name() == "console": - continue - } - device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm") - if err != nil { - if err == ErrNotADevice { - continue - } - return nil, err - } - out = append(out, device) - } - return out, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go deleted file mode 100644 index 1e84033da..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build windows - -package devices diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go deleted file mode 100644 index 885b6e5dd..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build linux freebsd - -package devices - -/* - -This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved. - -You can read what they are here: - - - http://www.makelinux.net/ldd3/chp-3-sect-2 - - http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94 - -Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9 - -*/ - -func Major(devNumber int) int64 { - return int64((devNumber >> 8) & 0xfff) -} - -func Minor(devNumber int) int64 { - return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go deleted file mode 100644 index 1afc52b4b..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ /dev/null @@ -1,143 +0,0 @@ -// +build linux - -package system - -import ( - "bufio" - "fmt" - "os" - "os/exec" - "syscall" - "unsafe" -) - -// If arg2 is nonzero, set the "child subreaper" attribute of the -// calling process; if arg2 is zero, unset the attribute. When a -// process is marked as a child subreaper, all of the children -// that it creates, and their descendants, will be marked as -// having a subreaper. In effect, a subreaper fulfills the role -// of init(1) for its descendant processes. Upon termination of -// a process that is orphaned (i.e., its immediate parent has -// already terminated) and marked as having a subreaper, the -// nearest still living ancestor subreaper will receive a SIGCHLD -// signal and be able to wait(2) on the process to discover its -// termination status. -const PR_SET_CHILD_SUBREAPER = 36 - -type ParentDeathSignal int - -func (p ParentDeathSignal) Restore() error { - if p == 0 { - return nil - } - current, err := GetParentDeathSignal() - if err != nil { - return err - } - if p == current { - return nil - } - return p.Set() -} - -func (p ParentDeathSignal) Set() error { - return SetParentDeathSignal(uintptr(p)) -} - -func Execv(cmd string, args []string, env []string) error { - name, err := exec.LookPath(cmd) - if err != nil { - return err - } - - return syscall.Exec(name, args, env) -} - -func Prlimit(pid, resource int, limit syscall.Rlimit) error { - _, _, err := syscall.RawSyscall6(syscall.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0) - if err != 0 { - return err - } - return nil -} - -func SetParentDeathSignal(sig uintptr) error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 { - return err - } - return nil -} - -func GetParentDeathSignal() (ParentDeathSignal, error) { - var sig int - _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0) - if err != 0 { - return -1, err - } - return ParentDeathSignal(sig), nil -} - -func SetKeepCaps() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 { - return err - } - - return nil -} - -func ClearKeepCaps() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 { - return err - } - - return nil -} - -func Setctty() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { - return err - } - return nil -} - -// RunningInUserNS detects whether we are currently running in a user namespace. -// Copied from github.com/lxc/lxd/shared/util.go -func RunningInUserNS() bool { - file, err := os.Open("/proc/self/uid_map") - if err != nil { - // This kernel-provided file only exists if user namespaces are supported - return false - } - defer file.Close() - - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return false - } - - line := string(l) - var a, b, c int64 - fmt.Sscanf(line, "%d %d %d", &a, &b, &c) - /* - * We assume we are in the initial user namespace if we have a full - * range - 4294967295 uids starting at uid 0. - */ - if a == 0 && b == 0 && c == 4294967295 { - return false - } - return true -} - -// SetSubreaper sets the value i as the subreaper setting for the calling process -func SetSubreaper(i int) error { - return Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) -} - -func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { - _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go deleted file mode 100644 index 37808a29f..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go +++ /dev/null @@ -1,27 +0,0 @@ -package system - -import ( - "io/ioutil" - "path/filepath" - "strconv" - "strings" -) - -// look in /proc to find the process start time so that we can verify -// that this pid has started after ourself -func GetProcessStartTime(pid int) (string, error) { - data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) - if err != nil { - return "", err - } - - parts := strings.Split(string(data), " ") - // the starttime is located at pos 22 - // from the man page - // - // starttime %llu (was %lu before Linux 2.6) - // (22) The time the process started after system boot. In kernels before Linux 2.6, this - // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks - // (divide by sysconf(_SC_CLK_TCK)). - return parts[22-1], nil // starts at 1 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go deleted file mode 100644 index 615ff4c82..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go +++ /dev/null @@ -1,40 +0,0 @@ -package system - -import ( - "fmt" - "runtime" - "syscall" -) - -// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092 -// -// We need different setns values for the different platforms and arch -// We are declaring the macro here because the SETNS syscall does not exist in th stdlib -var setNsMap = map[string]uintptr{ - "linux/386": 346, - "linux/arm64": 268, - "linux/amd64": 308, - "linux/arm": 375, - "linux/ppc": 350, - "linux/ppc64": 350, - "linux/ppc64le": 350, - "linux/s390x": 339, -} - -var sysSetns = setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] - -func SysSetns() uint32 { - return uint32(sysSetns) -} - -func Setns(fd uintptr, flags uintptr) error { - ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] - if !exists { - return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) - } - _, _, err := syscall.RawSyscall(ns, fd, flags, 0) - if err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go deleted file mode 100644 index c99006518..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,386 - -package system - -import ( - "syscall" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go deleted file mode 100644 index 0816bf828..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x - -package system - -import ( - "syscall" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go deleted file mode 100644 index 3f780f312..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,arm - -package system - -import ( - "syscall" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go deleted file mode 100644 index 4fba6c2b7..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build cgo,linux cgo,freebsd - -package system - -/* -#include -#include - -int GetLongBit() { -#ifdef _SC_LONG_BIT - int longbits; - - longbits = sysconf(_SC_LONG_BIT); - if (longbits < 0) { - longbits = (CHAR_BIT * sizeof(long)); - } - return longbits; -#else - return (CHAR_BIT * sizeof(long)); -#endif -} -*/ -import "C" - -func GetClockTicks() int { - return int(C.sysconf(C._SC_CLK_TCK)) -} - -func GetLongBit() int { - return int(C.GetLongBit()) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go deleted file mode 100644 index d93b5d5fd..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !cgo windows - -package system - -func GetClockTicks() int { - // TODO figure out a better alternative for platforms where we're missing cgo - // - // TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency(). - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx - // - // An example of its usage can be found here. - // https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx - - return 100 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go deleted file mode 100644 index e7cfd62b2..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !linux - -package system - -// RunningInUserNS is a stub for non-Linux systems -// Always returns false -func RunningInUserNS() bool { - return false -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go deleted file mode 100644 index 30f74dfb1..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go +++ /dev/null @@ -1,99 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -var _zero uintptr - -// Returns the size of xattrs and nil error -// Requires path, takes allocated []byte or nil as last argument -func Llistxattr(path string, dest []byte) (size int, err error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return -1, err - } - var newpathBytes unsafe.Pointer - if len(dest) > 0 { - newpathBytes = unsafe.Pointer(&dest[0]) - } else { - newpathBytes = unsafe.Pointer(&_zero) - } - - _size, _, errno := syscall.Syscall6(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(newpathBytes), uintptr(len(dest)), 0, 0, 0) - size = int(_size) - if errno != 0 { - return -1, errno - } - - return size, nil -} - -// Returns a []byte slice if the xattr is set and nil otherwise -// Requires path and its attribute as arguments -func Lgetxattr(path string, attr string) ([]byte, error) { - var sz int - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - - // Start with a 128 length byte array - sz = 128 - dest := make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - - switch { - case errno == syscall.ENODATA: - return nil, errno - case errno == syscall.ENOTSUP: - return nil, errno - case errno == syscall.ERANGE: - // 128 byte array might just not be good enough, - // A dummy buffer is used ``uintptr(0)`` to get real size - // of the xattrs on disk - _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0) - sz = int(_sz) - if sz < 0 { - return nil, errno - } - dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno != 0 { - return nil, errno - } - case errno != 0: - return nil, errno - } - sz = int(_sz) - return dest[:sz], nil -} - -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -}