commit
84e1ec6607
362 changed files with 32 additions and 38439 deletions
|
@ -88,7 +88,6 @@ You can access to a simple HTML frontend of Træfik.
|
||||||
- [Oxy](https://github.com/vulcand/oxy): an awesome proxy library made by Mailgun guys
|
- [Oxy](https://github.com/vulcand/oxy): an awesome proxy library made by Mailgun guys
|
||||||
- [Gorilla mux](https://github.com/gorilla/mux): famous request router
|
- [Gorilla mux](https://github.com/gorilla/mux): famous request router
|
||||||
- [Negroni](https://github.com/codegangsta/negroni): web middlewares made simple
|
- [Negroni](https://github.com/codegangsta/negroni): web middlewares made simple
|
||||||
- [Manners](https://github.com/mailgun/manners): graceful shutdown of http.Handler servers
|
|
||||||
- [Lego](https://github.com/xenolf/lego): the best [Let's Encrypt](https://letsencrypt.org) library in go
|
- [Lego](https://github.com/xenolf/lego): the best [Let's Encrypt](https://letsencrypt.org) library in go
|
||||||
|
|
||||||
## Test it
|
## Test it
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.7
|
FROM golang:1.8
|
||||||
|
|
||||||
# Install a more recent version of mercurial to avoid mismatching results
|
# Install a more recent version of mercurial to avoid mismatching results
|
||||||
# between glide run on a decently updated host system and the build container.
|
# between glide run on a decently updated host system and the build container.
|
||||||
|
|
136
glide.lock
generated
136
glide.lock
generated
|
@ -1,5 +1,5 @@
|
||||||
hash: 9d7c36c335fe9106ec79cb86a3b3824c23b63d3fb3a3fb4c75bd6915f3afcdd4
|
hash: 56175c5c588abf1169ba2425ac6dd7e3e5a8cf8ab6ad3f75cad51e45f5e94e3a
|
||||||
updated: 2017-03-08T18:53:12.139107148-07:00
|
updated: 2017-03-23T22:43:06.217624505+01:00
|
||||||
imports:
|
imports:
|
||||||
- name: bitbucket.org/ww/goautoneg
|
- name: bitbucket.org/ww/goautoneg
|
||||||
version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675
|
version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675
|
||||||
|
@ -132,78 +132,12 @@ imports:
|
||||||
- name: github.com/docker/distribution
|
- name: github.com/docker/distribution
|
||||||
version: 325b0804fef3a66309d962357aac3c2ce3f4d329
|
version: 325b0804fef3a66309d962357aac3c2ce3f4d329
|
||||||
subpackages:
|
subpackages:
|
||||||
- context
|
|
||||||
- digest
|
- digest
|
||||||
- reference
|
- reference
|
||||||
- registry/api/errcode
|
|
||||||
- registry/api/v2
|
|
||||||
- registry/client
|
|
||||||
- registry/client/auth
|
|
||||||
- registry/client/auth/challenge
|
|
||||||
- registry/client/transport
|
|
||||||
- registry/storage/cache
|
|
||||||
- registry/storage/cache/memory
|
|
||||||
- uuid
|
|
||||||
- name: github.com/docker/docker
|
- name: github.com/docker/docker
|
||||||
version: 49bf474f9ed7ce7143a59d1964ff7b7fd9b52178
|
version: 49bf474f9ed7ce7143a59d1964ff7b7fd9b52178
|
||||||
subpackages:
|
subpackages:
|
||||||
- api/types
|
|
||||||
- api/types/backend
|
|
||||||
- api/types/blkiodev
|
|
||||||
- api/types/container
|
|
||||||
- api/types/filters
|
|
||||||
- api/types/mount
|
|
||||||
- api/types/network
|
|
||||||
- api/types/registry
|
|
||||||
- api/types/strslice
|
|
||||||
- api/types/swarm
|
|
||||||
- api/types/versions
|
|
||||||
- builder
|
|
||||||
- builder/dockerignore
|
|
||||||
- cliconfig
|
|
||||||
- cliconfig/configfile
|
|
||||||
- daemon/graphdriver
|
|
||||||
- image
|
|
||||||
- image/v1
|
|
||||||
- layer
|
|
||||||
- namesgenerator
|
- namesgenerator
|
||||||
- oci
|
|
||||||
- opts
|
|
||||||
- pkg/archive
|
|
||||||
- pkg/chrootarchive
|
|
||||||
- pkg/fileutils
|
|
||||||
- pkg/gitutils
|
|
||||||
- pkg/homedir
|
|
||||||
- pkg/httputils
|
|
||||||
- pkg/idtools
|
|
||||||
- pkg/ioutils
|
|
||||||
- pkg/jsonlog
|
|
||||||
- pkg/jsonmessage
|
|
||||||
- pkg/longpath
|
|
||||||
- pkg/mount
|
|
||||||
- pkg/namesgenerator
|
|
||||||
- pkg/plugingetter
|
|
||||||
- pkg/plugins
|
|
||||||
- pkg/plugins/transport
|
|
||||||
- pkg/pools
|
|
||||||
- pkg/progress
|
|
||||||
- pkg/promise
|
|
||||||
- pkg/random
|
|
||||||
- pkg/reexec
|
|
||||||
- pkg/signal
|
|
||||||
- pkg/stdcopy
|
|
||||||
- pkg/streamformatter
|
|
||||||
- pkg/stringid
|
|
||||||
- pkg/symlink
|
|
||||||
- pkg/system
|
|
||||||
- pkg/tarsum
|
|
||||||
- pkg/term
|
|
||||||
- pkg/term/windows
|
|
||||||
- pkg/urlutil
|
|
||||||
- plugin/v2
|
|
||||||
- reference
|
|
||||||
- registry
|
|
||||||
- runconfig/opts
|
|
||||||
- name: github.com/docker/engine-api
|
- name: github.com/docker/engine-api
|
||||||
version: 3d1601b9d2436a70b0dfc045a23f6503d19195df
|
version: 3d1601b9d2436a70b0dfc045a23f6503d19195df
|
||||||
subpackages:
|
subpackages:
|
||||||
|
@ -329,8 +263,6 @@ imports:
|
||||||
version: 72f9bd7c4e0c2a40055ab3d0f09654f730cce982
|
version: 72f9bd7c4e0c2a40055ab3d0f09654f730cce982
|
||||||
- name: github.com/juju/ratelimit
|
- name: github.com/juju/ratelimit
|
||||||
version: 77ed1c8a01217656d2080ad51981f6e99adaa177
|
version: 77ed1c8a01217656d2080ad51981f6e99adaa177
|
||||||
- name: github.com/mailgun/manners
|
|
||||||
version: a585afd9d65c0e05f6c003f921e71ebc05074f4f
|
|
||||||
- name: github.com/mailgun/timetools
|
- name: github.com/mailgun/timetools
|
||||||
version: fd192d755b00c968d312d23f521eb0cdc6f66bd0
|
version: fd192d755b00c968d312d23f521eb0cdc6f66bd0
|
||||||
- name: github.com/mailru/easyjson
|
- name: github.com/mailru/easyjson
|
||||||
|
@ -382,9 +314,6 @@ imports:
|
||||||
- name: github.com/opencontainers/runc
|
- name: github.com/opencontainers/runc
|
||||||
version: 1a81e9ab1f138c091fe5c86d0883f87716088527
|
version: 1a81e9ab1f138c091fe5c86d0883f87716088527
|
||||||
subpackages:
|
subpackages:
|
||||||
- libcontainer/configs
|
|
||||||
- libcontainer/devices
|
|
||||||
- libcontainer/system
|
|
||||||
- libcontainer/user
|
- libcontainer/user
|
||||||
- name: github.com/ovh/go-ovh
|
- name: github.com/ovh/go-ovh
|
||||||
version: a8a4c0bc40e56322142649bda7b2b4bb15145b6e
|
version: a8a4c0bc40e56322142649bda7b2b4bb15145b6e
|
||||||
|
@ -709,63 +638,4 @@ imports:
|
||||||
- 1.5/tools/clientcmd/api
|
- 1.5/tools/clientcmd/api
|
||||||
- 1.5/tools/metrics
|
- 1.5/tools/metrics
|
||||||
- 1.5/transport
|
- 1.5/transport
|
||||||
testImports:
|
testImports: []
|
||||||
- name: github.com/Azure/go-ansiterm
|
|
||||||
version: fa152c58bc15761d0200cb75fe958b89a9d4888e
|
|
||||||
subpackages:
|
|
||||||
- winterm
|
|
||||||
- name: github.com/cloudfoundry-incubator/candiedyaml
|
|
||||||
version: 99c3df83b51532e3615f851d8c2dbb638f5313bf
|
|
||||||
- name: github.com/docker/libcompose
|
|
||||||
version: d1876c1d68527a49c0aac22a0b161acc7296b740
|
|
||||||
subpackages:
|
|
||||||
- config
|
|
||||||
- docker
|
|
||||||
- docker/builder
|
|
||||||
- docker/client
|
|
||||||
- docker/network
|
|
||||||
- labels
|
|
||||||
- logger
|
|
||||||
- lookup
|
|
||||||
- project
|
|
||||||
- project/events
|
|
||||||
- project/options
|
|
||||||
- utils
|
|
||||||
- version
|
|
||||||
- yaml
|
|
||||||
- name: github.com/flynn/go-shlex
|
|
||||||
version: 3f9db97f856818214da2e1057f8ad84803971cff
|
|
||||||
- name: github.com/go-check/check
|
|
||||||
version: 11d3bc7aa68e238947792f30573146a3231fc0f1
|
|
||||||
- name: github.com/gorilla/mux
|
|
||||||
version: e444e69cbd2e2e3e0749a2f3c717cec491552bbf
|
|
||||||
- name: github.com/libkermit/compose
|
|
||||||
version: cadc5a3b83a15790174bd7fbc75ea2529785e772
|
|
||||||
subpackages:
|
|
||||||
- check
|
|
||||||
- name: github.com/libkermit/docker
|
|
||||||
version: 55e3595409924fcfbb850811e5a7cdbe8960a0b7
|
|
||||||
- name: github.com/libkermit/docker-check
|
|
||||||
version: cbe0ef03b3d23070eac4d00ba8828f2cc7f7e5a3
|
|
||||||
- name: github.com/opencontainers/runtime-spec
|
|
||||||
version: 06479209bdc0d4135911688c18157bd39bd99c22
|
|
||||||
subpackages:
|
|
||||||
- specs-go
|
|
||||||
- name: github.com/vbatts/tar-split
|
|
||||||
version: 6810cedb21b2c3d0b9bb8f9af12ff2dc7a2f14df
|
|
||||||
subpackages:
|
|
||||||
- archive/tar
|
|
||||||
- tar/asm
|
|
||||||
- tar/storage
|
|
||||||
- name: github.com/vdemeester/shakers
|
|
||||||
version: 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
|
|
||||||
- name: github.com/xeipuuv/gojsonpointer
|
|
||||||
version: e0fe6f68307607d540ed8eac07a342c33fa1b54a
|
|
||||||
- name: github.com/xeipuuv/gojsonreference
|
|
||||||
version: e02fc20de94c78484cd5ffb007f8af96be030a45
|
|
||||||
- name: github.com/xeipuuv/gojsonschema
|
|
||||||
version: 00f9fafb54d2244d291b86ab63d12c38bd5c3886
|
|
||||||
- name: golang.org/x/time
|
|
||||||
version: a4bde12657593d5e90d0533a3e4fd95e635124cb
|
|
||||||
subpackages:
|
|
||||||
- rate
|
|
||||||
|
|
|
@ -47,7 +47,6 @@ import:
|
||||||
- package: github.com/hashicorp/consul
|
- package: github.com/hashicorp/consul
|
||||||
subpackages:
|
subpackages:
|
||||||
- api
|
- api
|
||||||
- package: github.com/mailgun/manners
|
|
||||||
- package: github.com/streamrail/concurrent-map
|
- package: github.com/streamrail/concurrent-map
|
||||||
- package: github.com/stretchr/testify
|
- package: github.com/stretchr/testify
|
||||||
subpackages:
|
subpackages:
|
||||||
|
|
62
server.go
62
server.go
|
@ -29,13 +29,13 @@ import (
|
||||||
"github.com/containous/traefik/provider"
|
"github.com/containous/traefik/provider"
|
||||||
"github.com/containous/traefik/safe"
|
"github.com/containous/traefik/safe"
|
||||||
"github.com/containous/traefik/types"
|
"github.com/containous/traefik/types"
|
||||||
"github.com/mailgun/manners"
|
|
||||||
"github.com/streamrail/concurrent-map"
|
"github.com/streamrail/concurrent-map"
|
||||||
"github.com/vulcand/oxy/cbreaker"
|
"github.com/vulcand/oxy/cbreaker"
|
||||||
"github.com/vulcand/oxy/connlimit"
|
"github.com/vulcand/oxy/connlimit"
|
||||||
"github.com/vulcand/oxy/forward"
|
"github.com/vulcand/oxy/forward"
|
||||||
"github.com/vulcand/oxy/roundrobin"
|
"github.com/vulcand/oxy/roundrobin"
|
||||||
"github.com/vulcand/oxy/utils"
|
"github.com/vulcand/oxy/utils"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
var oxyLogger = &OxyLogger{}
|
var oxyLogger = &OxyLogger{}
|
||||||
|
@ -58,7 +58,7 @@ type Server struct {
|
||||||
type serverEntryPoints map[string]*serverEntryPoint
|
type serverEntryPoints map[string]*serverEntryPoint
|
||||||
|
|
||||||
type serverEntryPoint struct {
|
type serverEntryPoint struct {
|
||||||
httpServer *manners.GracefulServer
|
httpServer *http.Server
|
||||||
httpRouter *middlewares.HandlerSwitcher
|
httpRouter *middlewares.HandlerSwitcher
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,15 +114,23 @@ func (server *Server) Wait() {
|
||||||
|
|
||||||
// Stop stops the server
|
// Stop stops the server
|
||||||
func (server *Server) Stop() {
|
func (server *Server) Stop() {
|
||||||
for serverEntryPointName, serverEntryPoint := range server.serverEntryPoints {
|
defer log.Info("Server stopped")
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(server.globalConfiguration.GraceTimeOut)*time.Second)
|
var wg sync.WaitGroup
|
||||||
go func() {
|
for sepn, sep := range server.serverEntryPoints {
|
||||||
log.Debugf("Waiting %d seconds before killing connections on entrypoint %s...", 30, serverEntryPointName)
|
wg.Add(1)
|
||||||
serverEntryPoint.httpServer.BlockingClose()
|
go func(serverEntryPointName string, serverEntryPoint *serverEntryPoint) {
|
||||||
|
defer wg.Done()
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(server.globalConfiguration.GraceTimeOut)*time.Second)
|
||||||
|
log.Debugf("Waiting %d seconds before killing connections on entrypoint %s...", server.globalConfiguration.GraceTimeOut, serverEntryPointName)
|
||||||
|
if err := serverEntryPoint.httpServer.Shutdown(ctx); err != nil {
|
||||||
|
log.Debugf("Wait is over due to: %s", err)
|
||||||
|
serverEntryPoint.httpServer.Close()
|
||||||
|
}
|
||||||
cancel()
|
cancel()
|
||||||
}()
|
log.Debugf("Entrypoint %s closed", serverEntryPointName)
|
||||||
<-ctx.Done()
|
}(sepn, sep)
|
||||||
}
|
}
|
||||||
|
wg.Wait()
|
||||||
server.stopChan <- true
|
server.stopChan <- true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -191,7 +199,7 @@ func (server *Server) startHTTPServers() {
|
||||||
if server.globalConfiguration.EntryPoints[newServerEntryPointName].Compress {
|
if server.globalConfiguration.EntryPoints[newServerEntryPointName].Compress {
|
||||||
serverMiddlewares = append(serverMiddlewares, &middlewares.Compress{})
|
serverMiddlewares = append(serverMiddlewares, &middlewares.Compress{})
|
||||||
}
|
}
|
||||||
newsrv, err := server.prepareServer(newServerEntryPointName, newServerEntryPoint.httpRouter, server.globalConfiguration.EntryPoints[newServerEntryPointName], nil, serverMiddlewares...)
|
newsrv, err := server.prepareServer(newServerEntryPointName, newServerEntryPoint.httpRouter, server.globalConfiguration.EntryPoints[newServerEntryPointName], serverMiddlewares...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("Error preparing server: ", err)
|
log.Fatal("Error preparing server: ", err)
|
||||||
}
|
}
|
||||||
|
@ -493,21 +501,20 @@ func (server *Server) createTLSConfig(entryPointName string, tlsOption *TLS, rou
|
||||||
return config, nil
|
return config, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (server *Server) startServer(srv *manners.GracefulServer, globalConfiguration GlobalConfiguration) {
|
func (server *Server) startServer(srv *http.Server, globalConfiguration GlobalConfiguration) {
|
||||||
log.Infof("Starting server on %s", srv.Addr)
|
log.Infof("Starting server on %s", srv.Addr)
|
||||||
|
var err error
|
||||||
if srv.TLSConfig != nil {
|
if srv.TLSConfig != nil {
|
||||||
if err := srv.ListenAndServeTLSWithConfig(srv.TLSConfig); err != nil {
|
err = srv.ListenAndServeTLS("", "")
|
||||||
log.Fatal("Error creating server: ", err)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
if err := srv.ListenAndServe(); err != nil {
|
err = srv.ListenAndServe()
|
||||||
log.Fatal("Error creating server: ", err)
|
}
|
||||||
}
|
if err != nil {
|
||||||
|
log.Error("Error creating server: ", err)
|
||||||
}
|
}
|
||||||
log.Info("Server stopped")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (server *Server) prepareServer(entryPointName string, router *middlewares.HandlerSwitcher, entryPoint *EntryPoint, oldServer *manners.GracefulServer, middlewares ...negroni.Handler) (*manners.GracefulServer, error) {
|
func (server *Server) prepareServer(entryPointName string, router *middlewares.HandlerSwitcher, entryPoint *EntryPoint, middlewares ...negroni.Handler) (*http.Server, error) {
|
||||||
log.Infof("Preparing server %s %+v", entryPointName, entryPoint)
|
log.Infof("Preparing server %s %+v", entryPointName, entryPoint)
|
||||||
// middlewares
|
// middlewares
|
||||||
var negroni = negroni.New()
|
var negroni = negroni.New()
|
||||||
|
@ -521,24 +528,11 @@ func (server *Server) prepareServer(entryPointName string, router *middlewares.H
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if oldServer == nil {
|
return &http.Server{
|
||||||
return manners.NewWithServer(
|
|
||||||
&http.Server{
|
|
||||||
Addr: entryPoint.Address,
|
|
||||||
Handler: negroni,
|
|
||||||
TLSConfig: tlsConfig,
|
|
||||||
}), nil
|
|
||||||
}
|
|
||||||
gracefulServer, err := oldServer.HijackListener(&http.Server{
|
|
||||||
Addr: entryPoint.Address,
|
Addr: entryPoint.Address,
|
||||||
Handler: negroni,
|
Handler: negroni,
|
||||||
TLSConfig: tlsConfig,
|
TLSConfig: tlsConfig,
|
||||||
}, tlsConfig)
|
}, nil
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Error hijacking server: %s", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return gracefulServer, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (server *Server) buildEntryPoints(globalConfiguration GlobalConfiguration) map[string]*serverEntryPoint {
|
func (server *Server) buildEntryPoints(globalConfiguration GlobalConfiguration) map[string]*serverEntryPoint {
|
||||||
|
|
85
vendor/github.com/docker/distribution/context/context.go
generated
vendored
85
vendor/github.com/docker/distribution/context/context.go
generated
vendored
|
@ -1,85 +0,0 @@
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/docker/distribution/uuid"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Context is a copy of Context from the golang.org/x/net/context package.
|
|
||||||
type Context interface {
|
|
||||||
context.Context
|
|
||||||
}
|
|
||||||
|
|
||||||
// instanceContext is a context that provides only an instance id. It is
|
|
||||||
// provided as the main background context.
|
|
||||||
type instanceContext struct {
|
|
||||||
Context
|
|
||||||
id string // id of context, logged as "instance.id"
|
|
||||||
once sync.Once // once protect generation of the id
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ic *instanceContext) Value(key interface{}) interface{} {
|
|
||||||
if key == "instance.id" {
|
|
||||||
ic.once.Do(func() {
|
|
||||||
// We want to lazy initialize the UUID such that we don't
|
|
||||||
// call a random generator from the package initialization
|
|
||||||
// code. For various reasons random could not be available
|
|
||||||
// https://github.com/docker/distribution/issues/782
|
|
||||||
ic.id = uuid.Generate().String()
|
|
||||||
})
|
|
||||||
return ic.id
|
|
||||||
}
|
|
||||||
|
|
||||||
return ic.Context.Value(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
var background = &instanceContext{
|
|
||||||
Context: context.Background(),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Background returns a non-nil, empty Context. The background context
|
|
||||||
// provides a single key, "instance.id" that is globally unique to the
|
|
||||||
// process.
|
|
||||||
func Background() Context {
|
|
||||||
return background
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithValue returns a copy of parent in which the value associated with key is
|
|
||||||
// val. Use context Values only for request-scoped data that transits processes
|
|
||||||
// and APIs, not for passing optional parameters to functions.
|
|
||||||
func WithValue(parent Context, key, val interface{}) Context {
|
|
||||||
return context.WithValue(parent, key, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stringMapContext is a simple context implementation that checks a map for a
|
|
||||||
// key, falling back to a parent if not present.
|
|
||||||
type stringMapContext struct {
|
|
||||||
context.Context
|
|
||||||
m map[string]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithValues returns a context that proxies lookups through a map. Only
|
|
||||||
// supports string keys.
|
|
||||||
func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
|
|
||||||
mo := make(map[string]interface{}, len(m)) // make our own copy.
|
|
||||||
for k, v := range m {
|
|
||||||
mo[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
return stringMapContext{
|
|
||||||
Context: ctx,
|
|
||||||
m: mo,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (smc stringMapContext) Value(key interface{}) interface{} {
|
|
||||||
if ks, ok := key.(string); ok {
|
|
||||||
if v, ok := smc.m[ks]; ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return smc.Context.Value(key)
|
|
||||||
}
|
|
89
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
89
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
|
@ -1,89 +0,0 @@
|
||||||
// Package context provides several utilities for working with
|
|
||||||
// golang.org/x/net/context in http requests. Primarily, the focus is on
|
|
||||||
// logging relevant request information but this package is not limited to
|
|
||||||
// that purpose.
|
|
||||||
//
|
|
||||||
// The easiest way to get started is to get the background context:
|
|
||||||
//
|
|
||||||
// ctx := context.Background()
|
|
||||||
//
|
|
||||||
// The returned context should be passed around your application and be the
|
|
||||||
// root of all other context instances. If the application has a version, this
|
|
||||||
// line should be called before anything else:
|
|
||||||
//
|
|
||||||
// ctx := context.WithVersion(context.Background(), version)
|
|
||||||
//
|
|
||||||
// The above will store the version in the context and will be available to
|
|
||||||
// the logger.
|
|
||||||
//
|
|
||||||
// Logging
|
|
||||||
//
|
|
||||||
// The most useful aspect of this package is GetLogger. This function takes
|
|
||||||
// any context.Context interface and returns the current logger from the
|
|
||||||
// context. Canonical usage looks like this:
|
|
||||||
//
|
|
||||||
// GetLogger(ctx).Infof("something interesting happened")
|
|
||||||
//
|
|
||||||
// GetLogger also takes optional key arguments. The keys will be looked up in
|
|
||||||
// the context and reported with the logger. The following example would
|
|
||||||
// return a logger that prints the version with each log message:
|
|
||||||
//
|
|
||||||
// ctx := context.Context(context.Background(), "version", version)
|
|
||||||
// GetLogger(ctx, "version").Infof("this log message has a version field")
|
|
||||||
//
|
|
||||||
// The above would print out a log message like this:
|
|
||||||
//
|
|
||||||
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
|
|
||||||
//
|
|
||||||
// When used with WithLogger, we gain the ability to decorate the context with
|
|
||||||
// loggers that have information from disparate parts of the call stack.
|
|
||||||
// Following from the version example, we can build a new context with the
|
|
||||||
// configured logger such that we always print the version field:
|
|
||||||
//
|
|
||||||
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
|
|
||||||
//
|
|
||||||
// Since the logger has been pushed to the context, we can now get the version
|
|
||||||
// field for free with our log messages. Future calls to GetLogger on the new
|
|
||||||
// context will have the version field:
|
|
||||||
//
|
|
||||||
// GetLogger(ctx).Infof("this log message has a version field")
|
|
||||||
//
|
|
||||||
// This becomes more powerful when we start stacking loggers. Let's say we
|
|
||||||
// have the version logger from above but also want a request id. Using the
|
|
||||||
// context above, in our request scoped function, we place another logger in
|
|
||||||
// the context:
|
|
||||||
//
|
|
||||||
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
|
|
||||||
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
|
|
||||||
//
|
|
||||||
// When GetLogger is called on the new context, "http.request.id" will be
|
|
||||||
// included as a logger field, along with the original "version" field:
|
|
||||||
//
|
|
||||||
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
|
|
||||||
//
|
|
||||||
// Note that this only affects the new context, the previous context, with the
|
|
||||||
// version field, can be used independently. Put another way, the new logger,
|
|
||||||
// added to the request context, is unique to that context and can have
|
|
||||||
// request scoped varaibles.
|
|
||||||
//
|
|
||||||
// HTTP Requests
|
|
||||||
//
|
|
||||||
// This package also contains several methods for working with http requests.
|
|
||||||
// The concepts are very similar to those described above. We simply place the
|
|
||||||
// request in the context using WithRequest. This makes the request variables
|
|
||||||
// available. GetRequestLogger can then be called to get request specific
|
|
||||||
// variables in a log line:
|
|
||||||
//
|
|
||||||
// ctx = WithRequest(ctx, req)
|
|
||||||
// GetRequestLogger(ctx).Infof("request variables")
|
|
||||||
//
|
|
||||||
// Like above, if we want to include the request data in all log messages in
|
|
||||||
// the context, we push the logger to a new context and use that one:
|
|
||||||
//
|
|
||||||
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
|
|
||||||
//
|
|
||||||
// The concept is fairly powerful and ensures that calls throughout the stack
|
|
||||||
// can be traced in log messages. Using the fields like "http.request.id", one
|
|
||||||
// can analyze call flow for a particular request with a simple grep of the
|
|
||||||
// logs.
|
|
||||||
package context
|
|
366
vendor/github.com/docker/distribution/context/http.go
generated
vendored
366
vendor/github.com/docker/distribution/context/http.go
generated
vendored
|
@ -1,366 +0,0 @@
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/distribution/uuid"
|
|
||||||
"github.com/gorilla/mux"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Common errors used with this package.
|
|
||||||
var (
|
|
||||||
ErrNoRequestContext = errors.New("no http request in context")
|
|
||||||
ErrNoResponseWriterContext = errors.New("no http response in context")
|
|
||||||
)
|
|
||||||
|
|
||||||
func parseIP(ipStr string) net.IP {
|
|
||||||
ip := net.ParseIP(ipStr)
|
|
||||||
if ip == nil {
|
|
||||||
log.Warnf("invalid remote IP address: %q", ipStr)
|
|
||||||
}
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoteAddr extracts the remote address of the request, taking into
|
|
||||||
// account proxy headers.
|
|
||||||
func RemoteAddr(r *http.Request) string {
|
|
||||||
if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
|
|
||||||
proxies := strings.Split(prior, ",")
|
|
||||||
if len(proxies) > 0 {
|
|
||||||
remoteAddr := strings.Trim(proxies[0], " ")
|
|
||||||
if parseIP(remoteAddr) != nil {
|
|
||||||
return remoteAddr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// X-Real-Ip is less supported, but worth checking in the
|
|
||||||
// absence of X-Forwarded-For
|
|
||||||
if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
|
|
||||||
if parseIP(realIP) != nil {
|
|
||||||
return realIP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.RemoteAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoteIP extracts the remote IP of the request, taking into
|
|
||||||
// account proxy headers.
|
|
||||||
func RemoteIP(r *http.Request) string {
|
|
||||||
addr := RemoteAddr(r)
|
|
||||||
|
|
||||||
// Try parsing it as "IP:port"
|
|
||||||
if ip, _, err := net.SplitHostPort(addr); err == nil {
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithRequest places the request on the context. The context of the request
|
|
||||||
// is assigned a unique id, available at "http.request.id". The request itself
|
|
||||||
// is available at "http.request". Other common attributes are available under
|
|
||||||
// the prefix "http.request.". If a request is already present on the context,
|
|
||||||
// this method will panic.
|
|
||||||
func WithRequest(ctx Context, r *http.Request) Context {
|
|
||||||
if ctx.Value("http.request") != nil {
|
|
||||||
// NOTE(stevvooe): This needs to be considered a programming error. It
|
|
||||||
// is unlikely that we'd want to have more than one request in
|
|
||||||
// context.
|
|
||||||
panic("only one request per context")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &httpRequestContext{
|
|
||||||
Context: ctx,
|
|
||||||
startedAt: time.Now(),
|
|
||||||
id: uuid.Generate().String(),
|
|
||||||
r: r,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRequest returns the http request in the given context. Returns
|
|
||||||
// ErrNoRequestContext if the context does not have an http request associated
|
|
||||||
// with it.
|
|
||||||
func GetRequest(ctx Context) (*http.Request, error) {
|
|
||||||
if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
return nil, ErrNoRequestContext
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRequestID attempts to resolve the current request id, if possible. An
|
|
||||||
// error is return if it is not available on the context.
|
|
||||||
func GetRequestID(ctx Context) string {
|
|
||||||
return GetStringValue(ctx, "http.request.id")
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithResponseWriter returns a new context and response writer that makes
|
|
||||||
// interesting response statistics available within the context.
|
|
||||||
func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
|
|
||||||
if closeNotifier, ok := w.(http.CloseNotifier); ok {
|
|
||||||
irwCN := &instrumentedResponseWriterCN{
|
|
||||||
instrumentedResponseWriter: instrumentedResponseWriter{
|
|
||||||
ResponseWriter: w,
|
|
||||||
Context: ctx,
|
|
||||||
},
|
|
||||||
CloseNotifier: closeNotifier,
|
|
||||||
}
|
|
||||||
|
|
||||||
return irwCN, irwCN
|
|
||||||
}
|
|
||||||
|
|
||||||
irw := instrumentedResponseWriter{
|
|
||||||
ResponseWriter: w,
|
|
||||||
Context: ctx,
|
|
||||||
}
|
|
||||||
return &irw, &irw
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetResponseWriter returns the http.ResponseWriter from the provided
|
|
||||||
// context. If not present, ErrNoResponseWriterContext is returned. The
|
|
||||||
// returned instance provides instrumentation in the context.
|
|
||||||
func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
|
|
||||||
v := ctx.Value("http.response")
|
|
||||||
|
|
||||||
rw, ok := v.(http.ResponseWriter)
|
|
||||||
if !ok || rw == nil {
|
|
||||||
return nil, ErrNoResponseWriterContext
|
|
||||||
}
|
|
||||||
|
|
||||||
return rw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getVarsFromRequest let's us change request vars implementation for testing
|
|
||||||
// and maybe future changes.
|
|
||||||
var getVarsFromRequest = mux.Vars
|
|
||||||
|
|
||||||
// WithVars extracts gorilla/mux vars and makes them available on the returned
|
|
||||||
// context. Variables are available at keys with the prefix "vars.". For
|
|
||||||
// example, if looking for the variable "name", it can be accessed as
|
|
||||||
// "vars.name". Implementations that are accessing values need not know that
|
|
||||||
// the underlying context is implemented with gorilla/mux vars.
|
|
||||||
func WithVars(ctx Context, r *http.Request) Context {
|
|
||||||
return &muxVarsContext{
|
|
||||||
Context: ctx,
|
|
||||||
vars: getVarsFromRequest(r),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRequestLogger returns a logger that contains fields from the request in
|
|
||||||
// the current context. If the request is not available in the context, no
|
|
||||||
// fields will display. Request loggers can safely be pushed onto the context.
|
|
||||||
func GetRequestLogger(ctx Context) Logger {
|
|
||||||
return GetLogger(ctx,
|
|
||||||
"http.request.id",
|
|
||||||
"http.request.method",
|
|
||||||
"http.request.host",
|
|
||||||
"http.request.uri",
|
|
||||||
"http.request.referer",
|
|
||||||
"http.request.useragent",
|
|
||||||
"http.request.remoteaddr",
|
|
||||||
"http.request.contenttype")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetResponseLogger reads the current response stats and builds a logger.
|
|
||||||
// Because the values are read at call time, pushing a logger returned from
|
|
||||||
// this function on the context will lead to missing or invalid data. Only
|
|
||||||
// call this at the end of a request, after the response has been written.
|
|
||||||
func GetResponseLogger(ctx Context) Logger {
|
|
||||||
l := getLogrusLogger(ctx,
|
|
||||||
"http.response.written",
|
|
||||||
"http.response.status",
|
|
||||||
"http.response.contenttype")
|
|
||||||
|
|
||||||
duration := Since(ctx, "http.request.startedat")
|
|
||||||
|
|
||||||
if duration > 0 {
|
|
||||||
l = l.WithField("http.response.duration", duration.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// httpRequestContext makes information about a request available to context.
|
|
||||||
type httpRequestContext struct {
|
|
||||||
Context
|
|
||||||
|
|
||||||
startedAt time.Time
|
|
||||||
id string
|
|
||||||
r *http.Request
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns a keyed element of the request for use in the context. To get
|
|
||||||
// the request itself, query "request". For other components, access them as
|
|
||||||
// "request.<component>". For example, r.RequestURI
|
|
||||||
func (ctx *httpRequestContext) Value(key interface{}) interface{} {
|
|
||||||
if keyStr, ok := key.(string); ok {
|
|
||||||
if keyStr == "http.request" {
|
|
||||||
return ctx.r
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasPrefix(keyStr, "http.request.") {
|
|
||||||
goto fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.Split(keyStr, ".")
|
|
||||||
|
|
||||||
if len(parts) != 3 {
|
|
||||||
goto fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
switch parts[2] {
|
|
||||||
case "uri":
|
|
||||||
return ctx.r.RequestURI
|
|
||||||
case "remoteaddr":
|
|
||||||
return RemoteAddr(ctx.r)
|
|
||||||
case "method":
|
|
||||||
return ctx.r.Method
|
|
||||||
case "host":
|
|
||||||
return ctx.r.Host
|
|
||||||
case "referer":
|
|
||||||
referer := ctx.r.Referer()
|
|
||||||
if referer != "" {
|
|
||||||
return referer
|
|
||||||
}
|
|
||||||
case "useragent":
|
|
||||||
return ctx.r.UserAgent()
|
|
||||||
case "id":
|
|
||||||
return ctx.id
|
|
||||||
case "startedat":
|
|
||||||
return ctx.startedAt
|
|
||||||
case "contenttype":
|
|
||||||
ct := ctx.r.Header.Get("Content-Type")
|
|
||||||
if ct != "" {
|
|
||||||
return ct
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fallback:
|
|
||||||
return ctx.Context.Value(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
type muxVarsContext struct {
|
|
||||||
Context
|
|
||||||
vars map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *muxVarsContext) Value(key interface{}) interface{} {
|
|
||||||
if keyStr, ok := key.(string); ok {
|
|
||||||
if keyStr == "vars" {
|
|
||||||
return ctx.vars
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(keyStr, "vars.") {
|
|
||||||
keyStr = strings.TrimPrefix(keyStr, "vars.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := ctx.vars[keyStr]; ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx.Context.Value(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// instrumentedResponseWriterCN provides response writer information in a
|
|
||||||
// context. It implements http.CloseNotifier so that users can detect
|
|
||||||
// early disconnects.
|
|
||||||
type instrumentedResponseWriterCN struct {
|
|
||||||
instrumentedResponseWriter
|
|
||||||
http.CloseNotifier
|
|
||||||
}
|
|
||||||
|
|
||||||
// instrumentedResponseWriter provides response writer information in a
|
|
||||||
// context. This variant is only used in the case where CloseNotifier is not
|
|
||||||
// implemented by the parent ResponseWriter.
|
|
||||||
type instrumentedResponseWriter struct {
|
|
||||||
http.ResponseWriter
|
|
||||||
Context
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
status int
|
|
||||||
written int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
|
|
||||||
n, err = irw.ResponseWriter.Write(p)
|
|
||||||
|
|
||||||
irw.mu.Lock()
|
|
||||||
irw.written += int64(n)
|
|
||||||
|
|
||||||
// Guess the likely status if not set.
|
|
||||||
if irw.status == 0 {
|
|
||||||
irw.status = http.StatusOK
|
|
||||||
}
|
|
||||||
|
|
||||||
irw.mu.Unlock()
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (irw *instrumentedResponseWriter) WriteHeader(status int) {
|
|
||||||
irw.ResponseWriter.WriteHeader(status)
|
|
||||||
|
|
||||||
irw.mu.Lock()
|
|
||||||
irw.status = status
|
|
||||||
irw.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (irw *instrumentedResponseWriter) Flush() {
|
|
||||||
if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
|
|
||||||
flusher.Flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
|
|
||||||
if keyStr, ok := key.(string); ok {
|
|
||||||
if keyStr == "http.response" {
|
|
||||||
return irw
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasPrefix(keyStr, "http.response.") {
|
|
||||||
goto fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.Split(keyStr, ".")
|
|
||||||
|
|
||||||
if len(parts) != 3 {
|
|
||||||
goto fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
irw.mu.Lock()
|
|
||||||
defer irw.mu.Unlock()
|
|
||||||
|
|
||||||
switch parts[2] {
|
|
||||||
case "written":
|
|
||||||
return irw.written
|
|
||||||
case "status":
|
|
||||||
return irw.status
|
|
||||||
case "contenttype":
|
|
||||||
contentType := irw.Header().Get("Content-Type")
|
|
||||||
if contentType != "" {
|
|
||||||
return contentType
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fallback:
|
|
||||||
return irw.Context.Value(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} {
|
|
||||||
if keyStr, ok := key.(string); ok {
|
|
||||||
if keyStr == "http.response" {
|
|
||||||
return irw
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return irw.instrumentedResponseWriter.Value(key)
|
|
||||||
}
|
|
116
vendor/github.com/docker/distribution/context/logger.go
generated
vendored
116
vendor/github.com/docker/distribution/context/logger.go
generated
vendored
|
@ -1,116 +0,0 @@
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Logger provides a leveled-logging interface.
|
|
||||||
type Logger interface {
|
|
||||||
// standard logger methods
|
|
||||||
Print(args ...interface{})
|
|
||||||
Printf(format string, args ...interface{})
|
|
||||||
Println(args ...interface{})
|
|
||||||
|
|
||||||
Fatal(args ...interface{})
|
|
||||||
Fatalf(format string, args ...interface{})
|
|
||||||
Fatalln(args ...interface{})
|
|
||||||
|
|
||||||
Panic(args ...interface{})
|
|
||||||
Panicf(format string, args ...interface{})
|
|
||||||
Panicln(args ...interface{})
|
|
||||||
|
|
||||||
// Leveled methods, from logrus
|
|
||||||
Debug(args ...interface{})
|
|
||||||
Debugf(format string, args ...interface{})
|
|
||||||
Debugln(args ...interface{})
|
|
||||||
|
|
||||||
Error(args ...interface{})
|
|
||||||
Errorf(format string, args ...interface{})
|
|
||||||
Errorln(args ...interface{})
|
|
||||||
|
|
||||||
Info(args ...interface{})
|
|
||||||
Infof(format string, args ...interface{})
|
|
||||||
Infoln(args ...interface{})
|
|
||||||
|
|
||||||
Warn(args ...interface{})
|
|
||||||
Warnf(format string, args ...interface{})
|
|
||||||
Warnln(args ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLogger creates a new context with provided logger.
|
|
||||||
func WithLogger(ctx Context, logger Logger) Context {
|
|
||||||
return WithValue(ctx, "logger", logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLoggerWithField returns a logger instance with the specified field key
|
|
||||||
// and value without affecting the context. Extra specified keys will be
|
|
||||||
// resolved from the context.
|
|
||||||
func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
|
|
||||||
return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLoggerWithFields returns a logger instance with the specified fields
|
|
||||||
// without affecting the context. Extra specified keys will be resolved from
|
|
||||||
// the context.
|
|
||||||
func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
|
|
||||||
// must convert from interface{} -> interface{} to string -> interface{} for logrus.
|
|
||||||
lfields := make(logrus.Fields, len(fields))
|
|
||||||
for key, value := range fields {
|
|
||||||
lfields[fmt.Sprint(key)] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
return getLogrusLogger(ctx, keys...).WithFields(lfields)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLogger returns the logger from the current context, if present. If one
|
|
||||||
// or more keys are provided, they will be resolved on the context and
|
|
||||||
// included in the logger. While context.Value takes an interface, any key
|
|
||||||
// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
|
|
||||||
// a logging key field. If context keys are integer constants, for example,
|
|
||||||
// its recommended that a String method is implemented.
|
|
||||||
func GetLogger(ctx Context, keys ...interface{}) Logger {
|
|
||||||
return getLogrusLogger(ctx, keys...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLogrusLogger returns the logrus logger for the context. If one more keys
|
|
||||||
// are provided, they will be resolved on the context and included in the
|
|
||||||
// logger. Only use this function if specific logrus functionality is
|
|
||||||
// required.
|
|
||||||
func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
|
|
||||||
var logger *logrus.Entry
|
|
||||||
|
|
||||||
// Get a logger, if it is present.
|
|
||||||
loggerInterface := ctx.Value("logger")
|
|
||||||
if loggerInterface != nil {
|
|
||||||
if lgr, ok := loggerInterface.(*logrus.Entry); ok {
|
|
||||||
logger = lgr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if logger == nil {
|
|
||||||
fields := logrus.Fields{}
|
|
||||||
|
|
||||||
// Fill in the instance id, if we have it.
|
|
||||||
instanceID := ctx.Value("instance.id")
|
|
||||||
if instanceID != nil {
|
|
||||||
fields["instance.id"] = instanceID
|
|
||||||
}
|
|
||||||
|
|
||||||
fields["go.version"] = runtime.Version()
|
|
||||||
// If no logger is found, just return the standard logger.
|
|
||||||
logger = logrus.StandardLogger().WithFields(fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := logrus.Fields{}
|
|
||||||
for _, key := range keys {
|
|
||||||
v := ctx.Value(key)
|
|
||||||
if v != nil {
|
|
||||||
fields[fmt.Sprint(key)] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return logger.WithFields(fields)
|
|
||||||
}
|
|
104
vendor/github.com/docker/distribution/context/trace.go
generated
vendored
104
vendor/github.com/docker/distribution/context/trace.go
generated
vendored
|
@ -1,104 +0,0 @@
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/distribution/uuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WithTrace allocates a traced timing span in a new context. This allows a
|
|
||||||
// caller to track the time between calling WithTrace and the returned done
|
|
||||||
// function. When the done function is called, a log message is emitted with a
|
|
||||||
// "trace.duration" field, corresponding to the elapsed time and a
|
|
||||||
// "trace.func" field, corresponding to the function that called WithTrace.
|
|
||||||
//
|
|
||||||
// The logging keys "trace.id" and "trace.parent.id" are provided to implement
|
|
||||||
// dapper-like tracing. This function should be complemented with a WithSpan
|
|
||||||
// method that could be used for tracing distributed RPC calls.
|
|
||||||
//
|
|
||||||
// The main benefit of this function is to post-process log messages or
|
|
||||||
// intercept them in a hook to provide timing data. Trace ids and parent ids
|
|
||||||
// can also be linked to provide call tracing, if so required.
|
|
||||||
//
|
|
||||||
// Here is an example of the usage:
|
|
||||||
//
|
|
||||||
// func timedOperation(ctx Context) {
|
|
||||||
// ctx, done := WithTrace(ctx)
|
|
||||||
// defer done("this will be the log message")
|
|
||||||
// // ... function body ...
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// If the function ran for roughly 1s, such a usage would emit a log message
|
|
||||||
// as follows:
|
|
||||||
//
|
|
||||||
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
|
|
||||||
//
|
|
||||||
// Notice that the function name is automatically resolved, along with the
|
|
||||||
// package and a trace id is emitted that can be linked with parent ids.
|
|
||||||
func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = Background()
|
|
||||||
}
|
|
||||||
|
|
||||||
pc, file, line, _ := runtime.Caller(1)
|
|
||||||
f := runtime.FuncForPC(pc)
|
|
||||||
ctx = &traced{
|
|
||||||
Context: ctx,
|
|
||||||
id: uuid.Generate().String(),
|
|
||||||
start: time.Now(),
|
|
||||||
parent: GetStringValue(ctx, "trace.id"),
|
|
||||||
fnname: f.Name(),
|
|
||||||
file: file,
|
|
||||||
line: line,
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx, func(format string, a ...interface{}) {
|
|
||||||
GetLogger(ctx,
|
|
||||||
"trace.duration",
|
|
||||||
"trace.id",
|
|
||||||
"trace.parent.id",
|
|
||||||
"trace.func",
|
|
||||||
"trace.file",
|
|
||||||
"trace.line").
|
|
||||||
Debugf(format, a...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// traced represents a context that is traced for function call timing. It
|
|
||||||
// also provides fast lookup for the various attributes that are available on
|
|
||||||
// the trace.
|
|
||||||
type traced struct {
|
|
||||||
Context
|
|
||||||
id string
|
|
||||||
parent string
|
|
||||||
start time.Time
|
|
||||||
fnname string
|
|
||||||
file string
|
|
||||||
line int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *traced) Value(key interface{}) interface{} {
|
|
||||||
switch key {
|
|
||||||
case "trace.start":
|
|
||||||
return ts.start
|
|
||||||
case "trace.duration":
|
|
||||||
return time.Since(ts.start)
|
|
||||||
case "trace.id":
|
|
||||||
return ts.id
|
|
||||||
case "trace.parent.id":
|
|
||||||
if ts.parent == "" {
|
|
||||||
return nil // must return nil to signal no parent.
|
|
||||||
}
|
|
||||||
|
|
||||||
return ts.parent
|
|
||||||
case "trace.func":
|
|
||||||
return ts.fnname
|
|
||||||
case "trace.file":
|
|
||||||
return ts.file
|
|
||||||
case "trace.line":
|
|
||||||
return ts.line
|
|
||||||
}
|
|
||||||
|
|
||||||
return ts.Context.Value(key)
|
|
||||||
}
|
|
24
vendor/github.com/docker/distribution/context/util.go
generated
vendored
24
vendor/github.com/docker/distribution/context/util.go
generated
vendored
|
@ -1,24 +0,0 @@
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Since looks up key, which should be a time.Time, and returns the duration
|
|
||||||
// since that time. If the key is not found, the value returned will be zero.
|
|
||||||
// This is helpful when inferring metrics related to context execution times.
|
|
||||||
func Since(ctx Context, key interface{}) time.Duration {
|
|
||||||
if startedAt, ok := ctx.Value(key).(time.Time); ok {
|
|
||||||
return time.Since(startedAt)
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStringValue returns a string value from the context. The empty string
|
|
||||||
// will be returned if not found.
|
|
||||||
func GetStringValue(ctx Context, key interface{}) (value string) {
|
|
||||||
if valuev, ok := ctx.Value(key).(string); ok {
|
|
||||||
value = valuev
|
|
||||||
}
|
|
||||||
return value
|
|
||||||
}
|
|
16
vendor/github.com/docker/distribution/context/version.go
generated
vendored
16
vendor/github.com/docker/distribution/context/version.go
generated
vendored
|
@ -1,16 +0,0 @@
|
||||||
package context
|
|
||||||
|
|
||||||
// WithVersion stores the application version in the context. The new context
|
|
||||||
// gets a logger to ensure log messages are marked with the application
|
|
||||||
// version.
|
|
||||||
func WithVersion(ctx Context, version string) Context {
|
|
||||||
ctx = WithValue(ctx, "version", version)
|
|
||||||
// push a new logger onto the stack
|
|
||||||
return WithLogger(ctx, GetLogger(ctx, "version"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVersion returns the application version from the context. An empty
|
|
||||||
// string may returned if the version was not set on the context.
|
|
||||||
func GetVersion(ctx Context) string {
|
|
||||||
return GetStringValue(ctx, "version")
|
|
||||||
}
|
|
267
vendor/github.com/docker/distribution/registry/api/errcode/errors.go
generated
vendored
267
vendor/github.com/docker/distribution/registry/api/errcode/errors.go
generated
vendored
|
@ -1,267 +0,0 @@
|
||||||
package errcode
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrorCoder is the base interface for ErrorCode and Error allowing
|
|
||||||
// users of each to just call ErrorCode to get the real ID of each
|
|
||||||
type ErrorCoder interface {
|
|
||||||
ErrorCode() ErrorCode
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorCode represents the error type. The errors are serialized via strings
|
|
||||||
// and the integer format may change and should *never* be exported.
|
|
||||||
type ErrorCode int
|
|
||||||
|
|
||||||
var _ error = ErrorCode(0)
|
|
||||||
|
|
||||||
// ErrorCode just returns itself
|
|
||||||
func (ec ErrorCode) ErrorCode() ErrorCode {
|
|
||||||
return ec
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the ID/Value
|
|
||||||
func (ec ErrorCode) Error() string {
|
|
||||||
// NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
|
|
||||||
return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Descriptor returns the descriptor for the error code.
|
|
||||||
func (ec ErrorCode) Descriptor() ErrorDescriptor {
|
|
||||||
d, ok := errorCodeToDescriptors[ec]
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return ErrorCodeUnknown.Descriptor()
|
|
||||||
}
|
|
||||||
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the canonical identifier for this error code.
|
|
||||||
func (ec ErrorCode) String() string {
|
|
||||||
return ec.Descriptor().Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message returned the human-readable error message for this error code.
|
|
||||||
func (ec ErrorCode) Message() string {
|
|
||||||
return ec.Descriptor().Message
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalText encodes the receiver into UTF-8-encoded text and returns the
|
|
||||||
// result.
|
|
||||||
func (ec ErrorCode) MarshalText() (text []byte, err error) {
|
|
||||||
return []byte(ec.String()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalText decodes the form generated by MarshalText.
|
|
||||||
func (ec *ErrorCode) UnmarshalText(text []byte) error {
|
|
||||||
desc, ok := idToDescriptors[string(text)]
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
desc = ErrorCodeUnknown.Descriptor()
|
|
||||||
}
|
|
||||||
|
|
||||||
*ec = desc.Code
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMessage creates a new Error struct based on the passed-in info and
|
|
||||||
// overrides the Message property.
|
|
||||||
func (ec ErrorCode) WithMessage(message string) Error {
|
|
||||||
return Error{
|
|
||||||
Code: ec,
|
|
||||||
Message: message,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDetail creates a new Error struct based on the passed-in info and
|
|
||||||
// set the Detail property appropriately
|
|
||||||
func (ec ErrorCode) WithDetail(detail interface{}) Error {
|
|
||||||
return Error{
|
|
||||||
Code: ec,
|
|
||||||
Message: ec.Message(),
|
|
||||||
}.WithDetail(detail)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithArgs creates a new Error struct and sets the Args slice
|
|
||||||
func (ec ErrorCode) WithArgs(args ...interface{}) Error {
|
|
||||||
return Error{
|
|
||||||
Code: ec,
|
|
||||||
Message: ec.Message(),
|
|
||||||
}.WithArgs(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error provides a wrapper around ErrorCode with extra Details provided.
|
|
||||||
type Error struct {
|
|
||||||
Code ErrorCode `json:"code"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
Detail interface{} `json:"detail,omitempty"`
|
|
||||||
|
|
||||||
// TODO(duglin): See if we need an "args" property so we can do the
|
|
||||||
// variable substitution right before showing the message to the user
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ error = Error{}
|
|
||||||
|
|
||||||
// ErrorCode returns the ID/Value of this Error
|
|
||||||
func (e Error) ErrorCode() ErrorCode {
|
|
||||||
return e.Code
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns a human readable representation of the error.
|
|
||||||
func (e Error) Error() string {
|
|
||||||
return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDetail will return a new Error, based on the current one, but with
|
|
||||||
// some Detail info added
|
|
||||||
func (e Error) WithDetail(detail interface{}) Error {
|
|
||||||
return Error{
|
|
||||||
Code: e.Code,
|
|
||||||
Message: e.Message,
|
|
||||||
Detail: detail,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithArgs uses the passed-in list of interface{} as the substitution
|
|
||||||
// variables in the Error's Message string, but returns a new Error
|
|
||||||
func (e Error) WithArgs(args ...interface{}) Error {
|
|
||||||
return Error{
|
|
||||||
Code: e.Code,
|
|
||||||
Message: fmt.Sprintf(e.Code.Message(), args...),
|
|
||||||
Detail: e.Detail,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorDescriptor provides relevant information about a given error code.
|
|
||||||
type ErrorDescriptor struct {
|
|
||||||
// Code is the error code that this descriptor describes.
|
|
||||||
Code ErrorCode
|
|
||||||
|
|
||||||
// Value provides a unique, string key, often captilized with
|
|
||||||
// underscores, to identify the error code. This value is used as the
|
|
||||||
// keyed value when serializing api errors.
|
|
||||||
Value string
|
|
||||||
|
|
||||||
// Message is a short, human readable decription of the error condition
|
|
||||||
// included in API responses.
|
|
||||||
Message string
|
|
||||||
|
|
||||||
// Description provides a complete account of the errors purpose, suitable
|
|
||||||
// for use in documentation.
|
|
||||||
Description string
|
|
||||||
|
|
||||||
// HTTPStatusCode provides the http status code that is associated with
|
|
||||||
// this error condition.
|
|
||||||
HTTPStatusCode int
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseErrorCode returns the value by the string error code.
|
|
||||||
// `ErrorCodeUnknown` will be returned if the error is not known.
|
|
||||||
func ParseErrorCode(value string) ErrorCode {
|
|
||||||
ed, ok := idToDescriptors[value]
|
|
||||||
if ok {
|
|
||||||
return ed.Code
|
|
||||||
}
|
|
||||||
|
|
||||||
return ErrorCodeUnknown
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errors provides the envelope for multiple errors and a few sugar methods
|
|
||||||
// for use within the application.
|
|
||||||
type Errors []error
|
|
||||||
|
|
||||||
var _ error = Errors{}
|
|
||||||
|
|
||||||
func (errs Errors) Error() string {
|
|
||||||
switch len(errs) {
|
|
||||||
case 0:
|
|
||||||
return "<nil>"
|
|
||||||
case 1:
|
|
||||||
return errs[0].Error()
|
|
||||||
default:
|
|
||||||
msg := "errors:\n"
|
|
||||||
for _, err := range errs {
|
|
||||||
msg += err.Error() + "\n"
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the current number of errors.
|
|
||||||
func (errs Errors) Len() int {
|
|
||||||
return len(errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON converts slice of error, ErrorCode or Error into a
|
|
||||||
// slice of Error - then serializes
|
|
||||||
func (errs Errors) MarshalJSON() ([]byte, error) {
|
|
||||||
var tmpErrs struct {
|
|
||||||
Errors []Error `json:"errors,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, daErr := range errs {
|
|
||||||
var err Error
|
|
||||||
|
|
||||||
switch daErr.(type) {
|
|
||||||
case ErrorCode:
|
|
||||||
err = daErr.(ErrorCode).WithDetail(nil)
|
|
||||||
case Error:
|
|
||||||
err = daErr.(Error)
|
|
||||||
default:
|
|
||||||
err = ErrorCodeUnknown.WithDetail(daErr)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the Error struct was setup and they forgot to set the
|
|
||||||
// Message field (meaning its "") then grab it from the ErrCode
|
|
||||||
msg := err.Message
|
|
||||||
if msg == "" {
|
|
||||||
msg = err.Code.Message()
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpErrs.Errors = append(tmpErrs.Errors, Error{
|
|
||||||
Code: err.Code,
|
|
||||||
Message: msg,
|
|
||||||
Detail: err.Detail,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(tmpErrs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON deserializes []Error and then converts it into slice of
|
|
||||||
// Error or ErrorCode
|
|
||||||
func (errs *Errors) UnmarshalJSON(data []byte) error {
|
|
||||||
var tmpErrs struct {
|
|
||||||
Errors []Error
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(data, &tmpErrs); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var newErrs Errors
|
|
||||||
for _, daErr := range tmpErrs.Errors {
|
|
||||||
// If Message is empty or exactly matches the Code's message string
|
|
||||||
// then just use the Code, no need for a full Error struct
|
|
||||||
if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
|
|
||||||
// Error's w/o details get converted to ErrorCode
|
|
||||||
newErrs = append(newErrs, daErr.Code)
|
|
||||||
} else {
|
|
||||||
// Error's w/ details are untouched
|
|
||||||
newErrs = append(newErrs, Error{
|
|
||||||
Code: daErr.Code,
|
|
||||||
Message: daErr.Message,
|
|
||||||
Detail: daErr.Detail,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*errs = newErrs
|
|
||||||
return nil
|
|
||||||
}
|
|
44
vendor/github.com/docker/distribution/registry/api/errcode/handler.go
generated
vendored
44
vendor/github.com/docker/distribution/registry/api/errcode/handler.go
generated
vendored
|
@ -1,44 +0,0 @@
|
||||||
package errcode
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
|
|
||||||
// and sets the content-type header to 'application/json'. It will handle
|
|
||||||
// ErrorCoder and Errors, and if necessary will create an envelope.
|
|
||||||
func ServeJSON(w http.ResponseWriter, err error) error {
|
|
||||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
||||||
var sc int
|
|
||||||
|
|
||||||
switch errs := err.(type) {
|
|
||||||
case Errors:
|
|
||||||
if len(errs) < 1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if err, ok := errs[0].(ErrorCoder); ok {
|
|
||||||
sc = err.ErrorCode().Descriptor().HTTPStatusCode
|
|
||||||
}
|
|
||||||
case ErrorCoder:
|
|
||||||
sc = errs.ErrorCode().Descriptor().HTTPStatusCode
|
|
||||||
err = Errors{err} // create an envelope.
|
|
||||||
default:
|
|
||||||
// We just have an unhandled error type, so just place in an envelope
|
|
||||||
// and move along.
|
|
||||||
err = Errors{err}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sc == 0 {
|
|
||||||
sc = http.StatusInternalServerError
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(sc)
|
|
||||||
|
|
||||||
if err := json.NewEncoder(w).Encode(err); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
138
vendor/github.com/docker/distribution/registry/api/errcode/register.go
generated
vendored
138
vendor/github.com/docker/distribution/registry/api/errcode/register.go
generated
vendored
|
@ -1,138 +0,0 @@
|
||||||
package errcode
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{}
|
|
||||||
idToDescriptors = map[string]ErrorDescriptor{}
|
|
||||||
groupToDescriptors = map[string][]ErrorDescriptor{}
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrorCodeUnknown is a generic error that can be used as a last
|
|
||||||
// resort if there is no situation-specific error message that can be used
|
|
||||||
ErrorCodeUnknown = Register("errcode", ErrorDescriptor{
|
|
||||||
Value: "UNKNOWN",
|
|
||||||
Message: "unknown error",
|
|
||||||
Description: `Generic error returned when the error does not have an
|
|
||||||
API classification.`,
|
|
||||||
HTTPStatusCode: http.StatusInternalServerError,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeUnsupported is returned when an operation is not supported.
|
|
||||||
ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{
|
|
||||||
Value: "UNSUPPORTED",
|
|
||||||
Message: "The operation is unsupported.",
|
|
||||||
Description: `The operation was unsupported due to a missing
|
|
||||||
implementation or invalid set of parameters.`,
|
|
||||||
HTTPStatusCode: http.StatusMethodNotAllowed,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeUnauthorized is returned if a request requires
|
|
||||||
// authentication.
|
|
||||||
ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{
|
|
||||||
Value: "UNAUTHORIZED",
|
|
||||||
Message: "authentication required",
|
|
||||||
Description: `The access controller was unable to authenticate
|
|
||||||
the client. Often this will be accompanied by a
|
|
||||||
Www-Authenticate HTTP response header indicating how to
|
|
||||||
authenticate.`,
|
|
||||||
HTTPStatusCode: http.StatusUnauthorized,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeDenied is returned if a client does not have sufficient
|
|
||||||
// permission to perform an action.
|
|
||||||
ErrorCodeDenied = Register("errcode", ErrorDescriptor{
|
|
||||||
Value: "DENIED",
|
|
||||||
Message: "requested access to the resource is denied",
|
|
||||||
Description: `The access controller denied access for the
|
|
||||||
operation on a resource.`,
|
|
||||||
HTTPStatusCode: http.StatusForbidden,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeUnavailable provides a common error to report unavailability
|
|
||||||
// of a service or endpoint.
|
|
||||||
ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
|
|
||||||
Value: "UNAVAILABLE",
|
|
||||||
Message: "service unavailable",
|
|
||||||
Description: "Returned when a service is not available",
|
|
||||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeTooManyRequests is returned if a client attempts too many
|
|
||||||
// times to contact a service endpoint.
|
|
||||||
ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{
|
|
||||||
Value: "TOOMANYREQUESTS",
|
|
||||||
Message: "too many requests",
|
|
||||||
Description: `Returned when a client attempts to contact a
|
|
||||||
service too many times`,
|
|
||||||
HTTPStatusCode: http.StatusTooManyRequests,
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
var nextCode = 1000
|
|
||||||
var registerLock sync.Mutex
|
|
||||||
|
|
||||||
// Register will make the passed-in error known to the environment and
|
|
||||||
// return a new ErrorCode
|
|
||||||
func Register(group string, descriptor ErrorDescriptor) ErrorCode {
|
|
||||||
registerLock.Lock()
|
|
||||||
defer registerLock.Unlock()
|
|
||||||
|
|
||||||
descriptor.Code = ErrorCode(nextCode)
|
|
||||||
|
|
||||||
if _, ok := idToDescriptors[descriptor.Value]; ok {
|
|
||||||
panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
|
|
||||||
}
|
|
||||||
if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
|
|
||||||
panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
|
|
||||||
}
|
|
||||||
|
|
||||||
groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
|
|
||||||
errorCodeToDescriptors[descriptor.Code] = descriptor
|
|
||||||
idToDescriptors[descriptor.Value] = descriptor
|
|
||||||
|
|
||||||
nextCode++
|
|
||||||
return descriptor.Code
|
|
||||||
}
|
|
||||||
|
|
||||||
type byValue []ErrorDescriptor
|
|
||||||
|
|
||||||
func (a byValue) Len() int { return len(a) }
|
|
||||||
func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
||||||
func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
|
||||||
|
|
||||||
// GetGroupNames returns the list of Error group names that are registered
|
|
||||||
func GetGroupNames() []string {
|
|
||||||
keys := []string{}
|
|
||||||
|
|
||||||
for k := range groupToDescriptors {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetErrorCodeGroup returns the named group of error descriptors
|
|
||||||
func GetErrorCodeGroup(name string) []ErrorDescriptor {
|
|
||||||
desc := groupToDescriptors[name]
|
|
||||||
sort.Sort(byValue(desc))
|
|
||||||
return desc
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
|
|
||||||
// registered, irrespective of what group they're in
|
|
||||||
func GetErrorAllDescriptors() []ErrorDescriptor {
|
|
||||||
result := []ErrorDescriptor{}
|
|
||||||
|
|
||||||
for _, group := range GetGroupNames() {
|
|
||||||
result = append(result, GetErrorCodeGroup(group)...)
|
|
||||||
}
|
|
||||||
sort.Sort(byValue(result))
|
|
||||||
return result
|
|
||||||
}
|
|
1596
vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
generated
vendored
1596
vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
generated
vendored
File diff suppressed because it is too large
Load diff
9
vendor/github.com/docker/distribution/registry/api/v2/doc.go
generated
vendored
9
vendor/github.com/docker/distribution/registry/api/v2/doc.go
generated
vendored
|
@ -1,9 +0,0 @@
|
||||||
// Package v2 describes routes, urls and the error codes used in the Docker
|
|
||||||
// Registry JSON HTTP API V2. In addition to declarations, descriptors are
|
|
||||||
// provided for routes and error codes that can be used for implementation and
|
|
||||||
// automatically generating documentation.
|
|
||||||
//
|
|
||||||
// Definitions here are considered to be locked down for the V2 registry api.
|
|
||||||
// Any changes must be considered carefully and should not proceed without a
|
|
||||||
// change proposal in docker core.
|
|
||||||
package v2
|
|
136
vendor/github.com/docker/distribution/registry/api/v2/errors.go
generated
vendored
136
vendor/github.com/docker/distribution/registry/api/v2/errors.go
generated
vendored
|
@ -1,136 +0,0 @@
|
||||||
package v2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/docker/distribution/registry/api/errcode"
|
|
||||||
)
|
|
||||||
|
|
||||||
const errGroup = "registry.api.v2"
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrorCodeDigestInvalid is returned when uploading a blob if the
|
|
||||||
// provided digest does not match the blob contents.
|
|
||||||
ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
|
||||||
Value: "DIGEST_INVALID",
|
|
||||||
Message: "provided digest did not match uploaded content",
|
|
||||||
Description: `When a blob is uploaded, the registry will check that
|
|
||||||
the content matches the digest provided by the client. The error may
|
|
||||||
include a detail structure with the key "digest", including the
|
|
||||||
invalid digest string. This error may also be returned when a manifest
|
|
||||||
includes an invalid layer digest.`,
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeSizeInvalid is returned when uploading a blob if the provided
|
|
||||||
ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
|
||||||
Value: "SIZE_INVALID",
|
|
||||||
Message: "provided length did not match content length",
|
|
||||||
Description: `When a layer is uploaded, the provided size will be
|
|
||||||
checked against the uploaded content. If they do not match, this error
|
|
||||||
will be returned.`,
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeNameInvalid is returned when the name in the manifest does not
|
|
||||||
// match the provided name.
|
|
||||||
ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
|
||||||
Value: "NAME_INVALID",
|
|
||||||
Message: "invalid repository name",
|
|
||||||
Description: `Invalid repository name encountered either during
|
|
||||||
manifest validation or any API operation.`,
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeTagInvalid is returned when the tag in the manifest does not
|
|
||||||
// match the provided tag.
|
|
||||||
ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
|
||||||
Value: "TAG_INVALID",
|
|
||||||
Message: "manifest tag did not match URI",
|
|
||||||
Description: `During a manifest upload, if the tag in the manifest
|
|
||||||
does not match the uri tag, this error will be returned.`,
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeNameUnknown when the repository name is not known.
|
|
||||||
ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
|
||||||
Value: "NAME_UNKNOWN",
|
|
||||||
Message: "repository name not known to registry",
|
|
||||||
Description: `This is returned if the name used during an operation is
|
|
||||||
unknown to the registry.`,
|
|
||||||
HTTPStatusCode: http.StatusNotFound,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeManifestUnknown returned when image manifest is unknown.
|
|
||||||
ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
|
||||||
Value: "MANIFEST_UNKNOWN",
|
|
||||||
Message: "manifest unknown",
|
|
||||||
Description: `This error is returned when the manifest, identified by
|
|
||||||
name and tag is unknown to the repository.`,
|
|
||||||
HTTPStatusCode: http.StatusNotFound,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeManifestInvalid returned when an image manifest is invalid,
|
|
||||||
// typically during a PUT operation. This error encompasses all errors
|
|
||||||
// encountered during manifest validation that aren't signature errors.
|
|
||||||
ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
|
||||||
Value: "MANIFEST_INVALID",
|
|
||||||
Message: "manifest invalid",
|
|
||||||
Description: `During upload, manifests undergo several checks ensuring
|
|
||||||
validity. If those checks fail, this error may be returned, unless a
|
|
||||||
more specific error is included. The detail will contain information
|
|
||||||
the failed validation.`,
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeManifestUnverified is returned when the manifest fails
|
|
||||||
// signature verification.
|
|
||||||
ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
|
||||||
Value: "MANIFEST_UNVERIFIED",
|
|
||||||
Message: "manifest failed signature verification",
|
|
||||||
Description: `During manifest upload, if the manifest fails signature
|
|
||||||
verification, this error will be returned.`,
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeManifestBlobUnknown is returned when a manifest blob is
|
|
||||||
// unknown to the registry.
|
|
||||||
ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
|
||||||
Value: "MANIFEST_BLOB_UNKNOWN",
|
|
||||||
Message: "blob unknown to registry",
|
|
||||||
Description: `This error may be returned when a manifest blob is
|
|
||||||
unknown to the registry.`,
|
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeBlobUnknown is returned when a blob is unknown to the
|
|
||||||
// registry. This can happen when the manifest references a nonexistent
|
|
||||||
// layer or the result is not found by a blob fetch.
|
|
||||||
ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
|
||||||
Value: "BLOB_UNKNOWN",
|
|
||||||
Message: "blob unknown to registry",
|
|
||||||
Description: `This error may be returned when a blob is unknown to the
|
|
||||||
registry in a specified repository. This can be returned with a
|
|
||||||
standard get or if a manifest references an unknown layer during
|
|
||||||
upload.`,
|
|
||||||
HTTPStatusCode: http.StatusNotFound,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeBlobUploadUnknown is returned when an upload is unknown.
|
|
||||||
ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
|
||||||
Value: "BLOB_UPLOAD_UNKNOWN",
|
|
||||||
Message: "blob upload unknown to registry",
|
|
||||||
Description: `If a blob upload has been cancelled or was never
|
|
||||||
started, this error code may be returned.`,
|
|
||||||
HTTPStatusCode: http.StatusNotFound,
|
|
||||||
})
|
|
||||||
|
|
||||||
// ErrorCodeBlobUploadInvalid is returned when an upload is invalid.
|
|
||||||
ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
|
||||||
Value: "BLOB_UPLOAD_INVALID",
|
|
||||||
Message: "blob upload invalid",
|
|
||||||
Description: `The blob upload encountered an error and can no
|
|
||||||
longer proceed.`,
|
|
||||||
HTTPStatusCode: http.StatusNotFound,
|
|
||||||
})
|
|
||||||
)
|
|
161
vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
generated
vendored
161
vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
generated
vendored
|
@ -1,161 +0,0 @@
|
||||||
package v2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// according to rfc7230
|
|
||||||
reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`)
|
|
||||||
reQuotedValue = regexp.MustCompile(`^[^\\"]+`)
|
|
||||||
reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains
|
|
||||||
// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The
|
|
||||||
// function parses only the first element of the list, which is set by the very first proxy. It returns a map
|
|
||||||
// of corresponding key-value pairs and an unparsed slice of the input string.
|
|
||||||
//
|
|
||||||
// Examples of Forwarded header values:
|
|
||||||
//
|
|
||||||
// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown
|
|
||||||
// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80"
|
|
||||||
//
|
|
||||||
// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into
|
|
||||||
// {"for": "192.0.2.43:443", "host": "registry.example.org"}.
|
|
||||||
func parseForwardedHeader(forwarded string) (map[string]string, string, error) {
|
|
||||||
// Following are states of forwarded header parser. Any state could transition to a failure.
|
|
||||||
const (
|
|
||||||
// terminating state; can transition to Parameter
|
|
||||||
stateElement = iota
|
|
||||||
// terminating state; can transition to KeyValueDelimiter
|
|
||||||
stateParameter
|
|
||||||
// can transition to Value
|
|
||||||
stateKeyValueDelimiter
|
|
||||||
// can transition to one of { QuotedValue, PairEnd }
|
|
||||||
stateValue
|
|
||||||
// can transition to one of { EscapedCharacter, PairEnd }
|
|
||||||
stateQuotedValue
|
|
||||||
// can transition to one of { QuotedValue }
|
|
||||||
stateEscapedCharacter
|
|
||||||
// terminating state; can transition to one of { Parameter, Element }
|
|
||||||
statePairEnd
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
parameter string
|
|
||||||
value string
|
|
||||||
parse = forwarded[:]
|
|
||||||
res = map[string]string{}
|
|
||||||
state = stateElement
|
|
||||||
)
|
|
||||||
|
|
||||||
Loop:
|
|
||||||
for {
|
|
||||||
// skip spaces unless in quoted value
|
|
||||||
if state != stateQuotedValue && state != stateEscapedCharacter {
|
|
||||||
parse = strings.TrimLeftFunc(parse, unicode.IsSpace)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(parse) == 0 {
|
|
||||||
if state != stateElement && state != statePairEnd && state != stateParameter {
|
|
||||||
return nil, parse, fmt.Errorf("unexpected end of input")
|
|
||||||
}
|
|
||||||
// terminating
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
switch state {
|
|
||||||
// terminate at list element delimiter
|
|
||||||
case stateElement:
|
|
||||||
if parse[0] == ',' {
|
|
||||||
parse = parse[1:]
|
|
||||||
break Loop
|
|
||||||
}
|
|
||||||
state = stateParameter
|
|
||||||
|
|
||||||
// parse parameter (the key of key-value pair)
|
|
||||||
case stateParameter:
|
|
||||||
match := reToken.FindString(parse)
|
|
||||||
if len(match) == 0 {
|
|
||||||
return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse))
|
|
||||||
}
|
|
||||||
parameter = strings.ToLower(match)
|
|
||||||
parse = parse[len(match):]
|
|
||||||
state = stateKeyValueDelimiter
|
|
||||||
|
|
||||||
// parse '='
|
|
||||||
case stateKeyValueDelimiter:
|
|
||||||
if parse[0] != '=' {
|
|
||||||
return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse))
|
|
||||||
}
|
|
||||||
parse = parse[1:]
|
|
||||||
state = stateValue
|
|
||||||
|
|
||||||
// parse value or quoted value
|
|
||||||
case stateValue:
|
|
||||||
if parse[0] == '"' {
|
|
||||||
parse = parse[1:]
|
|
||||||
state = stateQuotedValue
|
|
||||||
} else {
|
|
||||||
value = reToken.FindString(parse)
|
|
||||||
if len(value) == 0 {
|
|
||||||
return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse))
|
|
||||||
}
|
|
||||||
if _, exists := res[parameter]; exists {
|
|
||||||
return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse))
|
|
||||||
}
|
|
||||||
res[parameter] = value
|
|
||||||
parse = parse[len(value):]
|
|
||||||
value = ""
|
|
||||||
state = statePairEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse a part of quoted value until the first backslash
|
|
||||||
case stateQuotedValue:
|
|
||||||
match := reQuotedValue.FindString(parse)
|
|
||||||
value += match
|
|
||||||
parse = parse[len(match):]
|
|
||||||
switch {
|
|
||||||
case len(parse) == 0:
|
|
||||||
return nil, parse, fmt.Errorf("unterminated quoted string")
|
|
||||||
case parse[0] == '"':
|
|
||||||
res[parameter] = value
|
|
||||||
value = ""
|
|
||||||
parse = parse[1:]
|
|
||||||
state = statePairEnd
|
|
||||||
case parse[0] == '\\':
|
|
||||||
parse = parse[1:]
|
|
||||||
state = stateEscapedCharacter
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse escaped character in a quoted string, ignore the backslash
|
|
||||||
// transition back to QuotedValue state
|
|
||||||
case stateEscapedCharacter:
|
|
||||||
c := reEscapedCharacter.FindString(parse)
|
|
||||||
if len(c) == 0 {
|
|
||||||
return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1)
|
|
||||||
}
|
|
||||||
value += c
|
|
||||||
parse = parse[1:]
|
|
||||||
state = stateQuotedValue
|
|
||||||
|
|
||||||
// expect either a new key-value pair, new list or end of input
|
|
||||||
case statePairEnd:
|
|
||||||
switch parse[0] {
|
|
||||||
case ';':
|
|
||||||
parse = parse[1:]
|
|
||||||
state = stateParameter
|
|
||||||
case ',':
|
|
||||||
state = stateElement
|
|
||||||
default:
|
|
||||||
return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, parse, nil
|
|
||||||
}
|
|
49
vendor/github.com/docker/distribution/registry/api/v2/routes.go
generated
vendored
49
vendor/github.com/docker/distribution/registry/api/v2/routes.go
generated
vendored
|
@ -1,49 +0,0 @@
|
||||||
package v2
|
|
||||||
|
|
||||||
import "github.com/gorilla/mux"
|
|
||||||
|
|
||||||
// The following are definitions of the name under which all V2 routes are
|
|
||||||
// registered. These symbols can be used to look up a route based on the name.
|
|
||||||
const (
|
|
||||||
RouteNameBase = "base"
|
|
||||||
RouteNameManifest = "manifest"
|
|
||||||
RouteNameTags = "tags"
|
|
||||||
RouteNameBlob = "blob"
|
|
||||||
RouteNameBlobUpload = "blob-upload"
|
|
||||||
RouteNameBlobUploadChunk = "blob-upload-chunk"
|
|
||||||
RouteNameCatalog = "catalog"
|
|
||||||
)
|
|
||||||
|
|
||||||
var allEndpoints = []string{
|
|
||||||
RouteNameManifest,
|
|
||||||
RouteNameCatalog,
|
|
||||||
RouteNameTags,
|
|
||||||
RouteNameBlob,
|
|
||||||
RouteNameBlobUpload,
|
|
||||||
RouteNameBlobUploadChunk,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Router builds a gorilla router with named routes for the various API
|
|
||||||
// methods. This can be used directly by both server implementations and
|
|
||||||
// clients.
|
|
||||||
func Router() *mux.Router {
|
|
||||||
return RouterWithPrefix("")
|
|
||||||
}
|
|
||||||
|
|
||||||
// RouterWithPrefix builds a gorilla router with a configured prefix
|
|
||||||
// on all routes.
|
|
||||||
func RouterWithPrefix(prefix string) *mux.Router {
|
|
||||||
rootRouter := mux.NewRouter()
|
|
||||||
router := rootRouter
|
|
||||||
if prefix != "" {
|
|
||||||
router = router.PathPrefix(prefix).Subrouter()
|
|
||||||
}
|
|
||||||
|
|
||||||
router.StrictSlash(true)
|
|
||||||
|
|
||||||
for _, descriptor := range routeDescriptors {
|
|
||||||
router.Path(descriptor.Path).Name(descriptor.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
return rootRouter
|
|
||||||
}
|
|
314
vendor/github.com/docker/distribution/registry/api/v2/urls.go
generated
vendored
314
vendor/github.com/docker/distribution/registry/api/v2/urls.go
generated
vendored
|
@ -1,314 +0,0 @@
|
||||||
package v2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
"github.com/gorilla/mux"
|
|
||||||
)
|
|
||||||
|
|
||||||
// URLBuilder creates registry API urls from a single base endpoint. It can be
|
|
||||||
// used to create urls for use in a registry client or server.
|
|
||||||
//
|
|
||||||
// All urls will be created from the given base, including the api version.
|
|
||||||
// For example, if a root of "/foo/" is provided, urls generated will be fall
|
|
||||||
// under "/foo/v2/...". Most application will only provide a schema, host and
|
|
||||||
// port, such as "https://localhost:5000/".
|
|
||||||
type URLBuilder struct {
|
|
||||||
root *url.URL // url root (ie http://localhost/)
|
|
||||||
router *mux.Router
|
|
||||||
relative bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewURLBuilder creates a URLBuilder with provided root url object.
|
|
||||||
func NewURLBuilder(root *url.URL, relative bool) *URLBuilder {
|
|
||||||
return &URLBuilder{
|
|
||||||
root: root,
|
|
||||||
router: Router(),
|
|
||||||
relative: relative,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewURLBuilderFromString workes identically to NewURLBuilder except it takes
|
|
||||||
// a string argument for the root, returning an error if it is not a valid
|
|
||||||
// url.
|
|
||||||
func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) {
|
|
||||||
u, err := url.Parse(root)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewURLBuilder(u, relative), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewURLBuilderFromRequest uses information from an *http.Request to
|
|
||||||
// construct the root url.
|
|
||||||
func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
|
|
||||||
var scheme string
|
|
||||||
|
|
||||||
forwardedProto := r.Header.Get("X-Forwarded-Proto")
|
|
||||||
// TODO: log the error
|
|
||||||
forwardedHeader, _, _ := parseForwardedHeader(r.Header.Get("Forwarded"))
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case len(forwardedProto) > 0:
|
|
||||||
scheme = forwardedProto
|
|
||||||
case len(forwardedHeader["proto"]) > 0:
|
|
||||||
scheme = forwardedHeader["proto"]
|
|
||||||
case r.TLS != nil:
|
|
||||||
scheme = "https"
|
|
||||||
case len(r.URL.Scheme) > 0:
|
|
||||||
scheme = r.URL.Scheme
|
|
||||||
default:
|
|
||||||
scheme = "http"
|
|
||||||
}
|
|
||||||
|
|
||||||
host := r.Host
|
|
||||||
|
|
||||||
if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 {
|
|
||||||
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
|
|
||||||
// comma-separated list of hosts, to which each proxy appends the
|
|
||||||
// requested host. We want to grab the first from this comma-separated
|
|
||||||
// list.
|
|
||||||
hosts := strings.SplitN(forwardedHost, ",", 2)
|
|
||||||
host = strings.TrimSpace(hosts[0])
|
|
||||||
} else if addr, exists := forwardedHeader["for"]; exists {
|
|
||||||
host = addr
|
|
||||||
} else if h, exists := forwardedHeader["host"]; exists {
|
|
||||||
host = h
|
|
||||||
}
|
|
||||||
|
|
||||||
portLessHost, port := host, ""
|
|
||||||
if !isIPv6Address(portLessHost) {
|
|
||||||
// with go 1.6, this would treat the last part of IPv6 address as a port
|
|
||||||
portLessHost, port, _ = net.SplitHostPort(host)
|
|
||||||
}
|
|
||||||
if forwardedPort := r.Header.Get("X-Forwarded-Port"); len(port) == 0 && len(forwardedPort) > 0 {
|
|
||||||
ports := strings.SplitN(forwardedPort, ",", 2)
|
|
||||||
forwardedPort = strings.TrimSpace(ports[0])
|
|
||||||
if _, err := strconv.ParseInt(forwardedPort, 10, 32); err == nil {
|
|
||||||
port = forwardedPort
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(portLessHost) > 0 {
|
|
||||||
host = portLessHost
|
|
||||||
}
|
|
||||||
if len(port) > 0 {
|
|
||||||
// remove enclosing brackets of ipv6 address otherwise they will be duplicated
|
|
||||||
if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' {
|
|
||||||
host = host[1 : len(host)-1]
|
|
||||||
}
|
|
||||||
// JoinHostPort properly encloses ipv6 addresses in square brackets
|
|
||||||
host = net.JoinHostPort(host, port)
|
|
||||||
} else if isIPv6Address(host) && host[0] != '[' {
|
|
||||||
// ipv6 needs to be enclosed in square brackets in urls
|
|
||||||
host = "[" + host + "]"
|
|
||||||
}
|
|
||||||
|
|
||||||
basePath := routeDescriptorsMap[RouteNameBase].Path
|
|
||||||
|
|
||||||
requestPath := r.URL.Path
|
|
||||||
index := strings.Index(requestPath, basePath)
|
|
||||||
|
|
||||||
u := &url.URL{
|
|
||||||
Scheme: scheme,
|
|
||||||
Host: host,
|
|
||||||
}
|
|
||||||
|
|
||||||
if index > 0 {
|
|
||||||
// N.B. index+1 is important because we want to include the trailing /
|
|
||||||
u.Path = requestPath[0 : index+1]
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewURLBuilder(u, relative)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildBaseURL constructs a base url for the API, typically just "/v2/".
|
|
||||||
func (ub *URLBuilder) BuildBaseURL() (string, error) {
|
|
||||||
route := ub.cloneRoute(RouteNameBase)
|
|
||||||
|
|
||||||
baseURL, err := route.URL()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return baseURL.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildCatalogURL constructs a url get a catalog of repositories
|
|
||||||
func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) {
|
|
||||||
route := ub.cloneRoute(RouteNameCatalog)
|
|
||||||
|
|
||||||
catalogURL, err := route.URL()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return appendValuesURL(catalogURL, values...).String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildTagsURL constructs a url to list the tags in the named repository.
|
|
||||||
func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) {
|
|
||||||
route := ub.cloneRoute(RouteNameTags)
|
|
||||||
|
|
||||||
tagsURL, err := route.URL("name", name.Name())
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tagsURL.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildManifestURL constructs a url for the manifest identified by name and
|
|
||||||
// reference. The argument reference may be either a tag or digest.
|
|
||||||
func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) {
|
|
||||||
route := ub.cloneRoute(RouteNameManifest)
|
|
||||||
|
|
||||||
tagOrDigest := ""
|
|
||||||
switch v := ref.(type) {
|
|
||||||
case reference.Tagged:
|
|
||||||
tagOrDigest = v.Tag()
|
|
||||||
case reference.Digested:
|
|
||||||
tagOrDigest = v.Digest().String()
|
|
||||||
}
|
|
||||||
|
|
||||||
manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return manifestURL.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildBlobURL constructs the url for the blob identified by name and dgst.
|
|
||||||
func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) {
|
|
||||||
route := ub.cloneRoute(RouteNameBlob)
|
|
||||||
|
|
||||||
layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String())
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return layerURL.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildBlobUploadURL constructs a url to begin a blob upload in the
|
|
||||||
// repository identified by name.
|
|
||||||
func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) {
|
|
||||||
route := ub.cloneRoute(RouteNameBlobUpload)
|
|
||||||
|
|
||||||
uploadURL, err := route.URL("name", name.Name())
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return appendValuesURL(uploadURL, values...).String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid,
|
|
||||||
// including any url values. This should generally not be used by clients, as
|
|
||||||
// this url is provided by server implementations during the blob upload
|
|
||||||
// process.
|
|
||||||
func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) {
|
|
||||||
route := ub.cloneRoute(RouteNameBlobUploadChunk)
|
|
||||||
|
|
||||||
uploadURL, err := route.URL("name", name.Name(), "uuid", uuid)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return appendValuesURL(uploadURL, values...).String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// clondedRoute returns a clone of the named route from the router. Routes
|
|
||||||
// must be cloned to avoid modifying them during url generation.
|
|
||||||
func (ub *URLBuilder) cloneRoute(name string) clonedRoute {
|
|
||||||
route := new(mux.Route)
|
|
||||||
root := new(url.URL)
|
|
||||||
|
|
||||||
*route = *ub.router.GetRoute(name) // clone the route
|
|
||||||
*root = *ub.root
|
|
||||||
|
|
||||||
return clonedRoute{Route: route, root: root, relative: ub.relative}
|
|
||||||
}
|
|
||||||
|
|
||||||
type clonedRoute struct {
|
|
||||||
*mux.Route
|
|
||||||
root *url.URL
|
|
||||||
relative bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) {
|
|
||||||
routeURL, err := cr.Route.URL(pairs...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if cr.relative {
|
|
||||||
return routeURL, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" {
|
|
||||||
routeURL.Path = routeURL.Path[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
url := cr.root.ResolveReference(routeURL)
|
|
||||||
url.Scheme = cr.root.Scheme
|
|
||||||
return url, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// appendValuesURL appends the parameters to the url.
|
|
||||||
func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
|
|
||||||
merged := u.Query()
|
|
||||||
|
|
||||||
for _, v := range values {
|
|
||||||
for k, vv := range v {
|
|
||||||
merged[k] = append(merged[k], vv...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
u.RawQuery = merged.Encode()
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
// appendValues appends the parameters to the url. Panics if the string is not
|
|
||||||
// a url.
|
|
||||||
func appendValues(u string, values ...url.Values) string {
|
|
||||||
up, err := url.Parse(u)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
panic(err) // should never happen
|
|
||||||
}
|
|
||||||
|
|
||||||
return appendValuesURL(up, values...).String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// isIPv6Address returns true if given string is a valid IPv6 address. No port is allowed. The address may be
|
|
||||||
// enclosed in square brackets.
|
|
||||||
func isIPv6Address(host string) bool {
|
|
||||||
if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' {
|
|
||||||
host = host[1 : len(host)-1]
|
|
||||||
}
|
|
||||||
// The IPv6 scoped addressing zone identifier starts after the last percent sign.
|
|
||||||
if i := strings.LastIndexByte(host, '%'); i > 0 {
|
|
||||||
host = host[:i]
|
|
||||||
}
|
|
||||||
ip := net.ParseIP(host)
|
|
||||||
if ip == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if ip.To16() == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if ip.To4() == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// dot can be present in ipv4-mapped address, it needs to come after a colon though
|
|
||||||
i := strings.IndexAny(host, ":.")
|
|
||||||
return i >= 0 && host[i] == ':'
|
|
||||||
}
|
|
58
vendor/github.com/docker/distribution/registry/client/auth/api_version.go
generated
vendored
58
vendor/github.com/docker/distribution/registry/client/auth/api_version.go
generated
vendored
|
@ -1,58 +0,0 @@
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// APIVersion represents a version of an API including its
|
|
||||||
// type and version number.
|
|
||||||
type APIVersion struct {
|
|
||||||
// Type refers to the name of a specific API specification
|
|
||||||
// such as "registry"
|
|
||||||
Type string
|
|
||||||
|
|
||||||
// Version is the version of the API specification implemented,
|
|
||||||
// This may omit the revision number and only include
|
|
||||||
// the major and minor version, such as "2.0"
|
|
||||||
Version string
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string formatted API Version
|
|
||||||
func (v APIVersion) String() string {
|
|
||||||
return v.Type + "/" + v.Version
|
|
||||||
}
|
|
||||||
|
|
||||||
// APIVersions gets the API versions out of an HTTP response using the provided
|
|
||||||
// version header as the key for the HTTP header.
|
|
||||||
func APIVersions(resp *http.Response, versionHeader string) []APIVersion {
|
|
||||||
versions := []APIVersion{}
|
|
||||||
if versionHeader != "" {
|
|
||||||
for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] {
|
|
||||||
for _, version := range strings.Fields(supportedVersions) {
|
|
||||||
versions = append(versions, ParseAPIVersion(version))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return versions
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseAPIVersion parses an API version string into an APIVersion
|
|
||||||
// Format (Expected, not enforced):
|
|
||||||
// API version string = <API type> '/' <API version>
|
|
||||||
// API type = [a-z][a-z0-9]*
|
|
||||||
// API version = [0-9]+(\.[0-9]+)?
|
|
||||||
// TODO(dmcgowan): Enforce format, add error condition, remove unknown type
|
|
||||||
func ParseAPIVersion(versionStr string) APIVersion {
|
|
||||||
idx := strings.IndexRune(versionStr, '/')
|
|
||||||
if idx == -1 {
|
|
||||||
return APIVersion{
|
|
||||||
Type: "unknown",
|
|
||||||
Version: versionStr,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return APIVersion{
|
|
||||||
Type: strings.ToLower(versionStr[:idx]),
|
|
||||||
Version: versionStr[idx+1:],
|
|
||||||
}
|
|
||||||
}
|
|
27
vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
generated
vendored
27
vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
generated
vendored
|
@ -1,27 +0,0 @@
|
||||||
package challenge
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FROM: https://golang.org/src/net/http/http.go
|
|
||||||
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
|
|
||||||
// return true if the string includes a port.
|
|
||||||
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
|
|
||||||
|
|
||||||
// FROM: http://golang.org/src/net/http/transport.go
|
|
||||||
var portMap = map[string]string{
|
|
||||||
"http": "80",
|
|
||||||
"https": "443",
|
|
||||||
}
|
|
||||||
|
|
||||||
// canonicalAddr returns url.Host but always with a ":port" suffix
|
|
||||||
// FROM: http://golang.org/src/net/http/transport.go
|
|
||||||
func canonicalAddr(url *url.URL) string {
|
|
||||||
addr := url.Host
|
|
||||||
if !hasPort(addr) {
|
|
||||||
return addr + ":" + portMap[url.Scheme]
|
|
||||||
}
|
|
||||||
return addr
|
|
||||||
}
|
|
237
vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
generated
vendored
237
vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
generated
vendored
|
@ -1,237 +0,0 @@
|
||||||
package challenge
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Challenge carries information from a WWW-Authenticate response header.
|
|
||||||
// See RFC 2617.
|
|
||||||
type Challenge struct {
|
|
||||||
// Scheme is the auth-scheme according to RFC 2617
|
|
||||||
Scheme string
|
|
||||||
|
|
||||||
// Parameters are the auth-params according to RFC 2617
|
|
||||||
Parameters map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Manager manages the challenges for endpoints.
|
|
||||||
// The challenges are pulled out of HTTP responses. Only
|
|
||||||
// responses which expect challenges should be added to
|
|
||||||
// the manager, since a non-unauthorized request will be
|
|
||||||
// viewed as not requiring challenges.
|
|
||||||
type Manager interface {
|
|
||||||
// GetChallenges returns the challenges for the given
|
|
||||||
// endpoint URL.
|
|
||||||
GetChallenges(endpoint url.URL) ([]Challenge, error)
|
|
||||||
|
|
||||||
// AddResponse adds the response to the challenge
|
|
||||||
// manager. The challenges will be parsed out of
|
|
||||||
// the WWW-Authenicate headers and added to the
|
|
||||||
// URL which was produced the response. If the
|
|
||||||
// response was authorized, any challenges for the
|
|
||||||
// endpoint will be cleared.
|
|
||||||
AddResponse(resp *http.Response) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSimpleManager returns an instance of
|
|
||||||
// Manger which only maps endpoints to challenges
|
|
||||||
// based on the responses which have been added the
|
|
||||||
// manager. The simple manager will make no attempt to
|
|
||||||
// perform requests on the endpoints or cache the responses
|
|
||||||
// to a backend.
|
|
||||||
func NewSimpleManager() Manager {
|
|
||||||
return &simpleManager{
|
|
||||||
Challanges: make(map[string][]Challenge),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type simpleManager struct {
|
|
||||||
sync.RWMutex
|
|
||||||
Challanges map[string][]Challenge
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalizeURL(endpoint *url.URL) {
|
|
||||||
endpoint.Host = strings.ToLower(endpoint.Host)
|
|
||||||
endpoint.Host = canonicalAddr(endpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
|
|
||||||
normalizeURL(&endpoint)
|
|
||||||
|
|
||||||
m.RLock()
|
|
||||||
defer m.RUnlock()
|
|
||||||
challenges := m.Challanges[endpoint.String()]
|
|
||||||
return challenges, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *simpleManager) AddResponse(resp *http.Response) error {
|
|
||||||
challenges := ResponseChallenges(resp)
|
|
||||||
if resp.Request == nil {
|
|
||||||
return fmt.Errorf("missing request reference")
|
|
||||||
}
|
|
||||||
urlCopy := url.URL{
|
|
||||||
Path: resp.Request.URL.Path,
|
|
||||||
Host: resp.Request.URL.Host,
|
|
||||||
Scheme: resp.Request.URL.Scheme,
|
|
||||||
}
|
|
||||||
normalizeURL(&urlCopy)
|
|
||||||
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
m.Challanges[urlCopy.String()] = challenges
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Octet types from RFC 2616.
|
|
||||||
type octetType byte
|
|
||||||
|
|
||||||
var octetTypes [256]octetType
|
|
||||||
|
|
||||||
const (
|
|
||||||
isToken octetType = 1 << iota
|
|
||||||
isSpace
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// OCTET = <any 8-bit sequence of data>
|
|
||||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
|
||||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
|
||||||
// CR = <US-ASCII CR, carriage return (13)>
|
|
||||||
// LF = <US-ASCII LF, linefeed (10)>
|
|
||||||
// SP = <US-ASCII SP, space (32)>
|
|
||||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
|
||||||
// <"> = <US-ASCII double-quote mark (34)>
|
|
||||||
// CRLF = CR LF
|
|
||||||
// LWS = [CRLF] 1*( SP | HT )
|
|
||||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
|
||||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
|
||||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
|
||||||
// token = 1*<any CHAR except CTLs or separators>
|
|
||||||
// qdtext = <any TEXT except <">>
|
|
||||||
|
|
||||||
for c := 0; c < 256; c++ {
|
|
||||||
var t octetType
|
|
||||||
isCtl := c <= 31 || c == 127
|
|
||||||
isChar := 0 <= c && c <= 127
|
|
||||||
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
|
|
||||||
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
|
|
||||||
t |= isSpace
|
|
||||||
}
|
|
||||||
if isChar && !isCtl && !isSeparator {
|
|
||||||
t |= isToken
|
|
||||||
}
|
|
||||||
octetTypes[c] = t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResponseChallenges returns a list of authorization challenges
|
|
||||||
// for the given http Response. Challenges are only checked if
|
|
||||||
// the response status code was a 401.
|
|
||||||
func ResponseChallenges(resp *http.Response) []Challenge {
|
|
||||||
if resp.StatusCode == http.StatusUnauthorized {
|
|
||||||
// Parse the WWW-Authenticate Header and store the challenges
|
|
||||||
// on this endpoint object.
|
|
||||||
return parseAuthHeader(resp.Header)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseAuthHeader(header http.Header) []Challenge {
|
|
||||||
challenges := []Challenge{}
|
|
||||||
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
|
|
||||||
v, p := parseValueAndParams(h)
|
|
||||||
if v != "" {
|
|
||||||
challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return challenges
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseValueAndParams(header string) (value string, params map[string]string) {
|
|
||||||
params = make(map[string]string)
|
|
||||||
value, s := expectToken(header)
|
|
||||||
if value == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value = strings.ToLower(value)
|
|
||||||
s = "," + skipSpace(s)
|
|
||||||
for strings.HasPrefix(s, ",") {
|
|
||||||
var pkey string
|
|
||||||
pkey, s = expectToken(skipSpace(s[1:]))
|
|
||||||
if pkey == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(s, "=") {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var pvalue string
|
|
||||||
pvalue, s = expectTokenOrQuoted(s[1:])
|
|
||||||
if pvalue == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pkey = strings.ToLower(pkey)
|
|
||||||
params[pkey] = pvalue
|
|
||||||
s = skipSpace(s)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func skipSpace(s string) (rest string) {
|
|
||||||
i := 0
|
|
||||||
for ; i < len(s); i++ {
|
|
||||||
if octetTypes[s[i]]&isSpace == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s[i:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func expectToken(s string) (token, rest string) {
|
|
||||||
i := 0
|
|
||||||
for ; i < len(s); i++ {
|
|
||||||
if octetTypes[s[i]]&isToken == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s[:i], s[i:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func expectTokenOrQuoted(s string) (value string, rest string) {
|
|
||||||
if !strings.HasPrefix(s, "\"") {
|
|
||||||
return expectToken(s)
|
|
||||||
}
|
|
||||||
s = s[1:]
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
switch s[i] {
|
|
||||||
case '"':
|
|
||||||
return s[:i], s[i+1:]
|
|
||||||
case '\\':
|
|
||||||
p := make([]byte, len(s)-1)
|
|
||||||
j := copy(p, s[:i])
|
|
||||||
escape := true
|
|
||||||
for i = i + 1; i < len(s); i++ {
|
|
||||||
b := s[i]
|
|
||||||
switch {
|
|
||||||
case escape:
|
|
||||||
escape = false
|
|
||||||
p[j] = b
|
|
||||||
j++
|
|
||||||
case b == '\\':
|
|
||||||
escape = true
|
|
||||||
case b == '"':
|
|
||||||
return string(p[:j]), s[i+1:]
|
|
||||||
default:
|
|
||||||
p[j] = b
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", ""
|
|
||||||
}
|
|
503
vendor/github.com/docker/distribution/registry/client/auth/session.go
generated
vendored
503
vendor/github.com/docker/distribution/registry/client/auth/session.go
generated
vendored
|
@ -1,503 +0,0 @@
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/distribution/registry/client"
|
|
||||||
"github.com/docker/distribution/registry/client/auth/challenge"
|
|
||||||
"github.com/docker/distribution/registry/client/transport"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNoBasicAuthCredentials is returned if a request can't be authorized with
|
|
||||||
// basic auth due to lack of credentials.
|
|
||||||
ErrNoBasicAuthCredentials = errors.New("no basic auth credentials")
|
|
||||||
|
|
||||||
// ErrNoToken is returned if a request is successful but the body does not
|
|
||||||
// contain an authorization token.
|
|
||||||
ErrNoToken = errors.New("authorization server did not include a token in the response")
|
|
||||||
)
|
|
||||||
|
|
||||||
const defaultClientID = "registry-client"
|
|
||||||
|
|
||||||
// AuthenticationHandler is an interface for authorizing a request from
|
|
||||||
// params from a "WWW-Authenicate" header for a single scheme.
|
|
||||||
type AuthenticationHandler interface {
|
|
||||||
// Scheme returns the scheme as expected from the "WWW-Authenicate" header.
|
|
||||||
Scheme() string
|
|
||||||
|
|
||||||
// AuthorizeRequest adds the authorization header to a request (if needed)
|
|
||||||
// using the parameters from "WWW-Authenticate" method. The parameters
|
|
||||||
// values depend on the scheme.
|
|
||||||
AuthorizeRequest(req *http.Request, params map[string]string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// CredentialStore is an interface for getting credentials for
|
|
||||||
// a given URL
|
|
||||||
type CredentialStore interface {
|
|
||||||
// Basic returns basic auth for the given URL
|
|
||||||
Basic(*url.URL) (string, string)
|
|
||||||
|
|
||||||
// RefreshToken returns a refresh token for the
|
|
||||||
// given URL and service
|
|
||||||
RefreshToken(*url.URL, string) string
|
|
||||||
|
|
||||||
// SetRefreshToken sets the refresh token if none
|
|
||||||
// is provided for the given url and service
|
|
||||||
SetRefreshToken(realm *url.URL, service, token string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAuthorizer creates an authorizer which can handle multiple authentication
|
|
||||||
// schemes. The handlers are tried in order, the higher priority authentication
|
|
||||||
// methods should be first. The challengeMap holds a list of challenges for
|
|
||||||
// a given root API endpoint (for example "https://registry-1.docker.io/v2/").
|
|
||||||
func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier {
|
|
||||||
return &endpointAuthorizer{
|
|
||||||
challenges: manager,
|
|
||||||
handlers: handlers,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type endpointAuthorizer struct {
|
|
||||||
challenges challenge.Manager
|
|
||||||
handlers []AuthenticationHandler
|
|
||||||
transport http.RoundTripper
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
|
|
||||||
pingPath := req.URL.Path
|
|
||||||
if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 {
|
|
||||||
pingPath = pingPath[:v2Root+4]
|
|
||||||
} else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 {
|
|
||||||
pingPath = pingPath[:v1Root] + "/v2/"
|
|
||||||
} else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ping := url.URL{
|
|
||||||
Host: req.URL.Host,
|
|
||||||
Scheme: req.URL.Scheme,
|
|
||||||
Path: pingPath,
|
|
||||||
}
|
|
||||||
|
|
||||||
challenges, err := ea.challenges.GetChallenges(ping)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(challenges) > 0 {
|
|
||||||
for _, handler := range ea.handlers {
|
|
||||||
for _, c := range challenges {
|
|
||||||
if c.Scheme != handler.Scheme() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := handler.AuthorizeRequest(req, c.Parameters); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is the minimum duration a token can last (in seconds).
|
|
||||||
// A token must not live less than 60 seconds because older versions
|
|
||||||
// of the Docker client didn't read their expiration from the token
|
|
||||||
// response and assumed 60 seconds. So to remain compatible with
|
|
||||||
// those implementations, a token must live at least this long.
|
|
||||||
const minimumTokenLifetimeSeconds = 60
|
|
||||||
|
|
||||||
// Private interface for time used by this package to enable tests to provide their own implementation.
|
|
||||||
type clock interface {
|
|
||||||
Now() time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type tokenHandler struct {
|
|
||||||
header http.Header
|
|
||||||
creds CredentialStore
|
|
||||||
transport http.RoundTripper
|
|
||||||
clock clock
|
|
||||||
|
|
||||||
offlineAccess bool
|
|
||||||
forceOAuth bool
|
|
||||||
clientID string
|
|
||||||
scopes []Scope
|
|
||||||
|
|
||||||
tokenLock sync.Mutex
|
|
||||||
tokenCache string
|
|
||||||
tokenExpiration time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scope is a type which is serializable to a string
|
|
||||||
// using the allow scope grammar.
|
|
||||||
type Scope interface {
|
|
||||||
String() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// RepositoryScope represents a token scope for access
|
|
||||||
// to a repository.
|
|
||||||
type RepositoryScope struct {
|
|
||||||
Repository string
|
|
||||||
Class string
|
|
||||||
Actions []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string representation of the repository
|
|
||||||
// using the scope grammar
|
|
||||||
func (rs RepositoryScope) String() string {
|
|
||||||
repoType := "repository"
|
|
||||||
if rs.Class != "" {
|
|
||||||
repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegistryScope represents a token scope for access
|
|
||||||
// to resources in the registry.
|
|
||||||
type RegistryScope struct {
|
|
||||||
Name string
|
|
||||||
Actions []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string representation of the user
|
|
||||||
// using the scope grammar
|
|
||||||
func (rs RegistryScope) String() string {
|
|
||||||
return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
// TokenHandlerOptions is used to configure a new token handler
|
|
||||||
type TokenHandlerOptions struct {
|
|
||||||
Transport http.RoundTripper
|
|
||||||
Credentials CredentialStore
|
|
||||||
|
|
||||||
OfflineAccess bool
|
|
||||||
ForceOAuth bool
|
|
||||||
ClientID string
|
|
||||||
Scopes []Scope
|
|
||||||
}
|
|
||||||
|
|
||||||
// An implementation of clock for providing real time data.
|
|
||||||
type realClock struct{}
|
|
||||||
|
|
||||||
// Now implements clock
|
|
||||||
func (realClock) Now() time.Time { return time.Now() }
|
|
||||||
|
|
||||||
// NewTokenHandler creates a new AuthenicationHandler which supports
|
|
||||||
// fetching tokens from a remote token server.
|
|
||||||
func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler {
|
|
||||||
// Create options...
|
|
||||||
return NewTokenHandlerWithOptions(TokenHandlerOptions{
|
|
||||||
Transport: transport,
|
|
||||||
Credentials: creds,
|
|
||||||
Scopes: []Scope{
|
|
||||||
RepositoryScope{
|
|
||||||
Repository: scope,
|
|
||||||
Actions: actions,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTokenHandlerWithOptions creates a new token handler using the provided
|
|
||||||
// options structure.
|
|
||||||
func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler {
|
|
||||||
handler := &tokenHandler{
|
|
||||||
transport: options.Transport,
|
|
||||||
creds: options.Credentials,
|
|
||||||
offlineAccess: options.OfflineAccess,
|
|
||||||
forceOAuth: options.ForceOAuth,
|
|
||||||
clientID: options.ClientID,
|
|
||||||
scopes: options.Scopes,
|
|
||||||
clock: realClock{},
|
|
||||||
}
|
|
||||||
|
|
||||||
return handler
|
|
||||||
}
|
|
||||||
|
|
||||||
func (th *tokenHandler) client() *http.Client {
|
|
||||||
return &http.Client{
|
|
||||||
Transport: th.transport,
|
|
||||||
Timeout: 15 * time.Second,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (th *tokenHandler) Scheme() string {
|
|
||||||
return "bearer"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
|
|
||||||
var additionalScopes []string
|
|
||||||
if fromParam := req.URL.Query().Get("from"); fromParam != "" {
|
|
||||||
additionalScopes = append(additionalScopes, RepositoryScope{
|
|
||||||
Repository: fromParam,
|
|
||||||
Actions: []string{"pull"},
|
|
||||||
}.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
token, err := th.getToken(params, additionalScopes...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) {
|
|
||||||
th.tokenLock.Lock()
|
|
||||||
defer th.tokenLock.Unlock()
|
|
||||||
scopes := make([]string, 0, len(th.scopes)+len(additionalScopes))
|
|
||||||
for _, scope := range th.scopes {
|
|
||||||
scopes = append(scopes, scope.String())
|
|
||||||
}
|
|
||||||
var addedScopes bool
|
|
||||||
for _, scope := range additionalScopes {
|
|
||||||
scopes = append(scopes, scope)
|
|
||||||
addedScopes = true
|
|
||||||
}
|
|
||||||
|
|
||||||
now := th.clock.Now()
|
|
||||||
if now.After(th.tokenExpiration) || addedScopes {
|
|
||||||
token, expiration, err := th.fetchToken(params, scopes)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// do not update cache for added scope tokens
|
|
||||||
if !addedScopes {
|
|
||||||
th.tokenCache = token
|
|
||||||
th.tokenExpiration = expiration
|
|
||||||
}
|
|
||||||
|
|
||||||
return token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return th.tokenCache, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type postTokenResponse struct {
|
|
||||||
AccessToken string `json:"access_token"`
|
|
||||||
RefreshToken string `json:"refresh_token"`
|
|
||||||
ExpiresIn int `json:"expires_in"`
|
|
||||||
IssuedAt time.Time `json:"issued_at"`
|
|
||||||
Scope string `json:"scope"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) {
|
|
||||||
form := url.Values{}
|
|
||||||
form.Set("scope", strings.Join(scopes, " "))
|
|
||||||
form.Set("service", service)
|
|
||||||
|
|
||||||
clientID := th.clientID
|
|
||||||
if clientID == "" {
|
|
||||||
// Use default client, this is a required field
|
|
||||||
clientID = defaultClientID
|
|
||||||
}
|
|
||||||
form.Set("client_id", clientID)
|
|
||||||
|
|
||||||
if refreshToken != "" {
|
|
||||||
form.Set("grant_type", "refresh_token")
|
|
||||||
form.Set("refresh_token", refreshToken)
|
|
||||||
} else if th.creds != nil {
|
|
||||||
form.Set("grant_type", "password")
|
|
||||||
username, password := th.creds.Basic(realm)
|
|
||||||
form.Set("username", username)
|
|
||||||
form.Set("password", password)
|
|
||||||
|
|
||||||
// attempt to get a refresh token
|
|
||||||
form.Set("access_type", "offline")
|
|
||||||
} else {
|
|
||||||
// refuse to do oauth without a grant type
|
|
||||||
return "", time.Time{}, fmt.Errorf("no supported grant type")
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := th.client().PostForm(realm.String(), form)
|
|
||||||
if err != nil {
|
|
||||||
return "", time.Time{}, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if !client.SuccessStatus(resp.StatusCode) {
|
|
||||||
err := client.HandleErrorResponse(resp)
|
|
||||||
return "", time.Time{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
decoder := json.NewDecoder(resp.Body)
|
|
||||||
|
|
||||||
var tr postTokenResponse
|
|
||||||
if err = decoder.Decode(&tr); err != nil {
|
|
||||||
return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tr.RefreshToken != "" && tr.RefreshToken != refreshToken {
|
|
||||||
th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
|
|
||||||
// The default/minimum lifetime.
|
|
||||||
tr.ExpiresIn = minimumTokenLifetimeSeconds
|
|
||||||
logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tr.IssuedAt.IsZero() {
|
|
||||||
// issued_at is optional in the token response.
|
|
||||||
tr.IssuedAt = th.clock.Now().UTC()
|
|
||||||
}
|
|
||||||
|
|
||||||
return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type getTokenResponse struct {
|
|
||||||
Token string `json:"token"`
|
|
||||||
AccessToken string `json:"access_token"`
|
|
||||||
ExpiresIn int `json:"expires_in"`
|
|
||||||
IssuedAt time.Time `json:"issued_at"`
|
|
||||||
RefreshToken string `json:"refresh_token"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) {
|
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", realm.String(), nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", time.Time{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
reqParams := req.URL.Query()
|
|
||||||
|
|
||||||
if service != "" {
|
|
||||||
reqParams.Add("service", service)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scope := range scopes {
|
|
||||||
reqParams.Add("scope", scope)
|
|
||||||
}
|
|
||||||
|
|
||||||
if th.offlineAccess {
|
|
||||||
reqParams.Add("offline_token", "true")
|
|
||||||
clientID := th.clientID
|
|
||||||
if clientID == "" {
|
|
||||||
clientID = defaultClientID
|
|
||||||
}
|
|
||||||
reqParams.Add("client_id", clientID)
|
|
||||||
}
|
|
||||||
|
|
||||||
if th.creds != nil {
|
|
||||||
username, password := th.creds.Basic(realm)
|
|
||||||
if username != "" && password != "" {
|
|
||||||
reqParams.Add("account", username)
|
|
||||||
req.SetBasicAuth(username, password)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
req.URL.RawQuery = reqParams.Encode()
|
|
||||||
|
|
||||||
resp, err := th.client().Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", time.Time{}, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if !client.SuccessStatus(resp.StatusCode) {
|
|
||||||
err := client.HandleErrorResponse(resp)
|
|
||||||
return "", time.Time{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
decoder := json.NewDecoder(resp.Body)
|
|
||||||
|
|
||||||
var tr getTokenResponse
|
|
||||||
if err = decoder.Decode(&tr); err != nil {
|
|
||||||
return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tr.RefreshToken != "" && th.creds != nil {
|
|
||||||
th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
// `access_token` is equivalent to `token` and if both are specified
|
|
||||||
// the choice is undefined. Canonicalize `access_token` by sticking
|
|
||||||
// things in `token`.
|
|
||||||
if tr.AccessToken != "" {
|
|
||||||
tr.Token = tr.AccessToken
|
|
||||||
}
|
|
||||||
|
|
||||||
if tr.Token == "" {
|
|
||||||
return "", time.Time{}, ErrNoToken
|
|
||||||
}
|
|
||||||
|
|
||||||
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
|
|
||||||
// The default/minimum lifetime.
|
|
||||||
tr.ExpiresIn = minimumTokenLifetimeSeconds
|
|
||||||
logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tr.IssuedAt.IsZero() {
|
|
||||||
// issued_at is optional in the token response.
|
|
||||||
tr.IssuedAt = th.clock.Now().UTC()
|
|
||||||
}
|
|
||||||
|
|
||||||
return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) {
|
|
||||||
realm, ok := params["realm"]
|
|
||||||
if !ok {
|
|
||||||
return "", time.Time{}, errors.New("no realm specified for token auth challenge")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(dmcgowan): Handle empty scheme and relative realm
|
|
||||||
realmURL, err := url.Parse(realm)
|
|
||||||
if err != nil {
|
|
||||||
return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
service := params["service"]
|
|
||||||
|
|
||||||
var refreshToken string
|
|
||||||
|
|
||||||
if th.creds != nil {
|
|
||||||
refreshToken = th.creds.RefreshToken(realmURL, service)
|
|
||||||
}
|
|
||||||
|
|
||||||
if refreshToken != "" || th.forceOAuth {
|
|
||||||
return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes)
|
|
||||||
}
|
|
||||||
|
|
||||||
return th.fetchTokenWithBasicAuth(realmURL, service, scopes)
|
|
||||||
}
|
|
||||||
|
|
||||||
type basicHandler struct {
|
|
||||||
creds CredentialStore
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBasicHandler creaters a new authentiation handler which adds
|
|
||||||
// basic authentication credentials to a request.
|
|
||||||
func NewBasicHandler(creds CredentialStore) AuthenticationHandler {
|
|
||||||
return &basicHandler{
|
|
||||||
creds: creds,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*basicHandler) Scheme() string {
|
|
||||||
return "basic"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
|
|
||||||
if bh.creds != nil {
|
|
||||||
username, password := bh.creds.Basic(req.URL)
|
|
||||||
if username != "" && password != "" {
|
|
||||||
req.SetBasicAuth(username, password)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ErrNoBasicAuthCredentials
|
|
||||||
}
|
|
162
vendor/github.com/docker/distribution/registry/client/blob_writer.go
generated
vendored
162
vendor/github.com/docker/distribution/registry/client/blob_writer.go
generated
vendored
|
@ -1,162 +0,0 @@
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/distribution"
|
|
||||||
"github.com/docker/distribution/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
type httpBlobUpload struct {
|
|
||||||
statter distribution.BlobStatter
|
|
||||||
client *http.Client
|
|
||||||
|
|
||||||
uuid string
|
|
||||||
startedAt time.Time
|
|
||||||
|
|
||||||
location string // always the last value of the location header.
|
|
||||||
offset int64
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) {
|
|
||||||
panic("Not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error {
|
|
||||||
if resp.StatusCode == http.StatusNotFound {
|
|
||||||
return distribution.ErrBlobUploadUnknown
|
|
||||||
}
|
|
||||||
return HandleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
|
|
||||||
req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r))
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer req.Body.Close()
|
|
||||||
|
|
||||||
resp, err := hbu.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !SuccessStatus(resp.StatusCode) {
|
|
||||||
return 0, hbu.handleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
|
|
||||||
hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
rng := resp.Header.Get("Range")
|
|
||||||
var start, end int64
|
|
||||||
if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
|
|
||||||
return 0, err
|
|
||||||
} else if n != 2 || end < start {
|
|
||||||
return 0, fmt.Errorf("bad range format: %s", rng)
|
|
||||||
}
|
|
||||||
|
|
||||||
return (end - start + 1), nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) {
|
|
||||||
req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p))
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1)))
|
|
||||||
req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p)))
|
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
|
||||||
|
|
||||||
resp, err := hbu.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !SuccessStatus(resp.StatusCode) {
|
|
||||||
return 0, hbu.handleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
|
|
||||||
hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
rng := resp.Header.Get("Range")
|
|
||||||
var start, end int
|
|
||||||
if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
|
|
||||||
return 0, err
|
|
||||||
} else if n != 2 || end < start {
|
|
||||||
return 0, fmt.Errorf("bad range format: %s", rng)
|
|
||||||
}
|
|
||||||
|
|
||||||
return (end - start + 1), nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hbu *httpBlobUpload) Size() int64 {
|
|
||||||
return hbu.offset
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hbu *httpBlobUpload) ID() string {
|
|
||||||
return hbu.uuid
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hbu *httpBlobUpload) StartedAt() time.Time {
|
|
||||||
return hbu.startedAt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
|
|
||||||
// TODO(dmcgowan): Check if already finished, if so just fetch
|
|
||||||
req, err := http.NewRequest("PUT", hbu.location, nil)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
values := req.URL.Query()
|
|
||||||
values.Set("digest", desc.Digest.String())
|
|
||||||
req.URL.RawQuery = values.Encode()
|
|
||||||
|
|
||||||
resp, err := hbu.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if !SuccessStatus(resp.StatusCode) {
|
|
||||||
return distribution.Descriptor{}, hbu.handleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
return hbu.statter.Stat(ctx, desc.Digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hbu *httpBlobUpload) Cancel(ctx context.Context) error {
|
|
||||||
req, err := http.NewRequest("DELETE", hbu.location, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
resp, err := hbu.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return hbu.handleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hbu *httpBlobUpload) Close() error {
|
|
||||||
hbu.closed = true
|
|
||||||
return nil
|
|
||||||
}
|
|
139
vendor/github.com/docker/distribution/registry/client/errors.go
generated
vendored
139
vendor/github.com/docker/distribution/registry/client/errors.go
generated
vendored
|
@ -1,139 +0,0 @@
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/docker/distribution/registry/api/errcode"
|
|
||||||
"github.com/docker/distribution/registry/client/auth/challenge"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
|
|
||||||
// errcode.Errors slice.
|
|
||||||
var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
|
|
||||||
|
|
||||||
// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
|
|
||||||
// returned when making a registry api call.
|
|
||||||
type UnexpectedHTTPStatusError struct {
|
|
||||||
Status string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *UnexpectedHTTPStatusError) Error() string {
|
|
||||||
return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnexpectedHTTPResponseError is returned when an expected HTTP status code
|
|
||||||
// is returned, but the content was unexpected and failed to be parsed.
|
|
||||||
type UnexpectedHTTPResponseError struct {
|
|
||||||
ParseErr error
|
|
||||||
StatusCode int
|
|
||||||
Response []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *UnexpectedHTTPResponseError) Error() string {
|
|
||||||
return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
|
|
||||||
var errors errcode.Errors
|
|
||||||
body, err := ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// For backward compatibility, handle irregularly formatted
|
|
||||||
// messages that contain a "details" field.
|
|
||||||
var detailsErr struct {
|
|
||||||
Details string `json:"details"`
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(body, &detailsErr)
|
|
||||||
if err == nil && detailsErr.Details != "" {
|
|
||||||
switch statusCode {
|
|
||||||
case http.StatusUnauthorized:
|
|
||||||
return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
|
|
||||||
case http.StatusTooManyRequests:
|
|
||||||
return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
|
|
||||||
default:
|
|
||||||
return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(body, &errors); err != nil {
|
|
||||||
return &UnexpectedHTTPResponseError{
|
|
||||||
ParseErr: err,
|
|
||||||
StatusCode: statusCode,
|
|
||||||
Response: body,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errors) == 0 {
|
|
||||||
// If there was no error specified in the body, return
|
|
||||||
// UnexpectedHTTPResponseError.
|
|
||||||
return &UnexpectedHTTPResponseError{
|
|
||||||
ParseErr: ErrNoErrorsInBody,
|
|
||||||
StatusCode: statusCode,
|
|
||||||
Response: body,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeErrorList(err error) []error {
|
|
||||||
if errL, ok := err.(errcode.Errors); ok {
|
|
||||||
return []error(errL)
|
|
||||||
}
|
|
||||||
return []error{err}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeErrors(err1, err2 error) error {
|
|
||||||
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleErrorResponse returns error parsed from HTTP response for an
|
|
||||||
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
|
|
||||||
// UnexpectedHTTPStatusError returned for response code outside of expected
|
|
||||||
// range.
|
|
||||||
func HandleErrorResponse(resp *http.Response) error {
|
|
||||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
|
||||||
// Check for OAuth errors within the `WWW-Authenticate` header first
|
|
||||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
|
||||||
for _, c := range challenge.ResponseChallenges(resp) {
|
|
||||||
if c.Scheme == "bearer" {
|
|
||||||
var err errcode.Error
|
|
||||||
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
|
||||||
switch c.Parameters["error"] {
|
|
||||||
case "invalid_token":
|
|
||||||
err.Code = errcode.ErrorCodeUnauthorized
|
|
||||||
case "insufficient_scope":
|
|
||||||
err.Code = errcode.ErrorCodeDenied
|
|
||||||
default:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if description := c.Parameters["error_description"]; description != "" {
|
|
||||||
err.Message = description
|
|
||||||
} else {
|
|
||||||
err.Message = err.Code.Message()
|
|
||||||
}
|
|
||||||
|
|
||||||
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
|
||||||
if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
|
||||||
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return &UnexpectedHTTPStatusError{Status: resp.Status}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SuccessStatus returns true if the argument is a successful HTTP response
|
|
||||||
// code (in the range 200 - 399 inclusive).
|
|
||||||
func SuccessStatus(status int) bool {
|
|
||||||
return status >= 200 && status <= 399
|
|
||||||
}
|
|
853
vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
853
vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
|
@ -1,853 +0,0 @@
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/distribution"
|
|
||||||
"github.com/docker/distribution/context"
|
|
||||||
"github.com/docker/distribution/digest"
|
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
"github.com/docker/distribution/registry/api/v2"
|
|
||||||
"github.com/docker/distribution/registry/client/transport"
|
|
||||||
"github.com/docker/distribution/registry/storage/cache"
|
|
||||||
"github.com/docker/distribution/registry/storage/cache/memory"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
|
|
||||||
type Registry interface {
|
|
||||||
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkHTTPRedirect is a callback that can manipulate redirected HTTP
|
|
||||||
// requests. It is used to preserve Accept and Range headers.
|
|
||||||
func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
|
|
||||||
if len(via) >= 10 {
|
|
||||||
return errors.New("stopped after 10 redirects")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(via) > 0 {
|
|
||||||
for headerName, headerVals := range via[0].Header {
|
|
||||||
if headerName != "Accept" && headerName != "Range" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, val := range headerVals {
|
|
||||||
// Don't add to redirected request if redirected
|
|
||||||
// request already has a header with the same
|
|
||||||
// name and value.
|
|
||||||
hasValue := false
|
|
||||||
for _, existingVal := range req.Header[headerName] {
|
|
||||||
if existingVal == val {
|
|
||||||
hasValue = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !hasValue {
|
|
||||||
req.Header.Add(headerName, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRegistry creates a registry namespace which can be used to get a listing of repositories
|
|
||||||
func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
|
|
||||||
ub, err := v2.NewURLBuilderFromString(baseURL, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
client := &http.Client{
|
|
||||||
Transport: transport,
|
|
||||||
Timeout: 1 * time.Minute,
|
|
||||||
CheckRedirect: checkHTTPRedirect,
|
|
||||||
}
|
|
||||||
|
|
||||||
return ®istry{
|
|
||||||
client: client,
|
|
||||||
ub: ub,
|
|
||||||
context: ctx,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type registry struct {
|
|
||||||
client *http.Client
|
|
||||||
ub *v2.URLBuilder
|
|
||||||
context context.Context
|
|
||||||
}
|
|
||||||
|
|
||||||
// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
|
|
||||||
// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there
|
|
||||||
// are no more entries
|
|
||||||
func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
|
|
||||||
var numFilled int
|
|
||||||
var returnErr error
|
|
||||||
|
|
||||||
values := buildCatalogValues(len(entries), last)
|
|
||||||
u, err := r.ub.BuildCatalogURL(values)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := r.client.Get(u)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if SuccessStatus(resp.StatusCode) {
|
|
||||||
var ctlg struct {
|
|
||||||
Repositories []string `json:"repositories"`
|
|
||||||
}
|
|
||||||
decoder := json.NewDecoder(resp.Body)
|
|
||||||
|
|
||||||
if err := decoder.Decode(&ctlg); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for cnt := range ctlg.Repositories {
|
|
||||||
entries[cnt] = ctlg.Repositories[cnt]
|
|
||||||
}
|
|
||||||
numFilled = len(ctlg.Repositories)
|
|
||||||
|
|
||||||
link := resp.Header.Get("Link")
|
|
||||||
if link == "" {
|
|
||||||
returnErr = io.EOF
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return 0, HandleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
return numFilled, returnErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRepository creates a new Repository for the given repository name and base URL.
|
|
||||||
func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
|
|
||||||
ub, err := v2.NewURLBuilderFromString(baseURL, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
client := &http.Client{
|
|
||||||
Transport: transport,
|
|
||||||
CheckRedirect: checkHTTPRedirect,
|
|
||||||
// TODO(dmcgowan): create cookie jar
|
|
||||||
}
|
|
||||||
|
|
||||||
return &repository{
|
|
||||||
client: client,
|
|
||||||
ub: ub,
|
|
||||||
name: name,
|
|
||||||
context: ctx,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type repository struct {
|
|
||||||
client *http.Client
|
|
||||||
ub *v2.URLBuilder
|
|
||||||
context context.Context
|
|
||||||
name reference.Named
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *repository) Named() reference.Named {
|
|
||||||
return r.name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
|
|
||||||
statter := &blobStatter{
|
|
||||||
name: r.name,
|
|
||||||
ub: r.ub,
|
|
||||||
client: r.client,
|
|
||||||
}
|
|
||||||
return &blobs{
|
|
||||||
name: r.name,
|
|
||||||
ub: r.ub,
|
|
||||||
client: r.client,
|
|
||||||
statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
|
|
||||||
// todo(richardscothern): options should be sent over the wire
|
|
||||||
return &manifests{
|
|
||||||
name: r.name,
|
|
||||||
ub: r.ub,
|
|
||||||
client: r.client,
|
|
||||||
etags: make(map[string]string),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *repository) Tags(ctx context.Context) distribution.TagService {
|
|
||||||
return &tags{
|
|
||||||
client: r.client,
|
|
||||||
ub: r.ub,
|
|
||||||
context: r.context,
|
|
||||||
name: r.Named(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// tags implements remote tagging operations.
|
|
||||||
type tags struct {
|
|
||||||
client *http.Client
|
|
||||||
ub *v2.URLBuilder
|
|
||||||
context context.Context
|
|
||||||
name reference.Named
|
|
||||||
}
|
|
||||||
|
|
||||||
// All returns all tags
|
|
||||||
func (t *tags) All(ctx context.Context) ([]string, error) {
|
|
||||||
var tags []string
|
|
||||||
|
|
||||||
u, err := t.ub.BuildTagsURL(t.name)
|
|
||||||
if err != nil {
|
|
||||||
return tags, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
resp, err := t.client.Get(u)
|
|
||||||
if err != nil {
|
|
||||||
return tags, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if SuccessStatus(resp.StatusCode) {
|
|
||||||
b, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return tags, err
|
|
||||||
}
|
|
||||||
|
|
||||||
tagsResponse := struct {
|
|
||||||
Tags []string `json:"tags"`
|
|
||||||
}{}
|
|
||||||
if err := json.Unmarshal(b, &tagsResponse); err != nil {
|
|
||||||
return tags, err
|
|
||||||
}
|
|
||||||
tags = append(tags, tagsResponse.Tags...)
|
|
||||||
if link := resp.Header.Get("Link"); link != "" {
|
|
||||||
u = strings.Trim(strings.Split(link, ";")[0], "<>")
|
|
||||||
} else {
|
|
||||||
return tags, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return tags, HandleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
|
|
||||||
desc := distribution.Descriptor{}
|
|
||||||
headers := response.Header
|
|
||||||
|
|
||||||
ctHeader := headers.Get("Content-Type")
|
|
||||||
if ctHeader == "" {
|
|
||||||
return distribution.Descriptor{}, errors.New("missing or empty Content-Type header")
|
|
||||||
}
|
|
||||||
desc.MediaType = ctHeader
|
|
||||||
|
|
||||||
digestHeader := headers.Get("Docker-Content-Digest")
|
|
||||||
if digestHeader == "" {
|
|
||||||
bytes, err := ioutil.ReadAll(response.Body)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
_, desc, err := distribution.UnmarshalManifest(ctHeader, bytes)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
return desc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
dgst, err := digest.ParseDigest(digestHeader)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
desc.Digest = dgst
|
|
||||||
|
|
||||||
lengthHeader := headers.Get("Content-Length")
|
|
||||||
if lengthHeader == "" {
|
|
||||||
return distribution.Descriptor{}, errors.New("missing or empty Content-Length header")
|
|
||||||
}
|
|
||||||
length, err := strconv.ParseInt(lengthHeader, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
desc.Size = length
|
|
||||||
|
|
||||||
return desc, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get issues a HEAD request for a Manifest against its named endpoint in order
|
|
||||||
// to construct a descriptor for the tag. If the registry doesn't support HEADing
|
|
||||||
// a manifest, fallback to GET.
|
|
||||||
func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
|
|
||||||
ref, err := reference.WithTag(t.name, tag)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
u, err := t.ub.BuildManifestURL(ref)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
newRequest := func(method string) (*http.Response, error) {
|
|
||||||
req, err := http.NewRequest(method, u, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, t := range distribution.ManifestMediaTypes() {
|
|
||||||
req.Header.Add("Accept", t)
|
|
||||||
}
|
|
||||||
resp, err := t.client.Do(req)
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := newRequest("HEAD")
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case resp.StatusCode >= 200 && resp.StatusCode < 400:
|
|
||||||
return descriptorFromResponse(resp)
|
|
||||||
default:
|
|
||||||
// if the response is an error - there will be no body to decode.
|
|
||||||
// Issue a GET request:
|
|
||||||
// - for data from a server that does not handle HEAD
|
|
||||||
// - to get error details in case of a failure
|
|
||||||
resp, err = newRequest("GET")
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode >= 200 && resp.StatusCode < 400 {
|
|
||||||
return descriptorFromResponse(resp)
|
|
||||||
}
|
|
||||||
return distribution.Descriptor{}, HandleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tags) Untag(ctx context.Context, tag string) error {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
type manifests struct {
|
|
||||||
name reference.Named
|
|
||||||
ub *v2.URLBuilder
|
|
||||||
client *http.Client
|
|
||||||
etags map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
|
|
||||||
ref, err := reference.WithDigest(ms.name, dgst)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
u, err := ms.ub.BuildManifestURL(ref)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := ms.client.Head(u)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if SuccessStatus(resp.StatusCode) {
|
|
||||||
return true, nil
|
|
||||||
} else if resp.StatusCode == http.StatusNotFound {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return false, HandleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddEtagToTag allows a client to supply an eTag to Get which will be
|
|
||||||
// used for a conditional HTTP request. If the eTag matches, a nil manifest
|
|
||||||
// and ErrManifestNotModified error will be returned. etag is automatically
|
|
||||||
// quoted when added to this map.
|
|
||||||
func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
|
|
||||||
return etagOption{tag, etag}
|
|
||||||
}
|
|
||||||
|
|
||||||
type etagOption struct{ tag, etag string }
|
|
||||||
|
|
||||||
func (o etagOption) Apply(ms distribution.ManifestService) error {
|
|
||||||
if ms, ok := ms.(*manifests); ok {
|
|
||||||
ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("etag options is a client-only option")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReturnContentDigest allows a client to set a the content digest on
|
|
||||||
// a successful request from the 'Docker-Content-Digest' header. This
|
|
||||||
// returned digest is represents the digest which the registry uses
|
|
||||||
// to refer to the content and can be used to delete the content.
|
|
||||||
func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption {
|
|
||||||
return contentDigestOption{dgst}
|
|
||||||
}
|
|
||||||
|
|
||||||
type contentDigestOption struct{ digest *digest.Digest }
|
|
||||||
|
|
||||||
func (o contentDigestOption) Apply(ms distribution.ManifestService) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
|
|
||||||
var (
|
|
||||||
digestOrTag string
|
|
||||||
ref reference.Named
|
|
||||||
err error
|
|
||||||
contentDgst *digest.Digest
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
if opt, ok := option.(distribution.WithTagOption); ok {
|
|
||||||
digestOrTag = opt.Tag
|
|
||||||
ref, err = reference.WithTag(ms.name, opt.Tag)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else if opt, ok := option.(contentDigestOption); ok {
|
|
||||||
contentDgst = opt.digest
|
|
||||||
} else {
|
|
||||||
err := option.Apply(ms)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if digestOrTag == "" {
|
|
||||||
digestOrTag = dgst.String()
|
|
||||||
ref, err = reference.WithDigest(ms.name, dgst)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
u, err := ms.ub.BuildManifestURL(ref)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", u, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, t := range distribution.ManifestMediaTypes() {
|
|
||||||
req.Header.Add("Accept", t)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := ms.etags[digestOrTag]; ok {
|
|
||||||
req.Header.Set("If-None-Match", ms.etags[digestOrTag])
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := ms.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode == http.StatusNotModified {
|
|
||||||
return nil, distribution.ErrManifestNotModified
|
|
||||||
} else if SuccessStatus(resp.StatusCode) {
|
|
||||||
if contentDgst != nil {
|
|
||||||
dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest"))
|
|
||||||
if err == nil {
|
|
||||||
*contentDgst = dgst
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mt := resp.Header.Get("Content-Type")
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
m, _, err := distribution.UnmarshalManifest(mt, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
return nil, HandleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the
|
|
||||||
// tag name in order to build the correct upload URL.
|
|
||||||
func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
|
|
||||||
ref := ms.name
|
|
||||||
var tagged bool
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
if opt, ok := option.(distribution.WithTagOption); ok {
|
|
||||||
var err error
|
|
||||||
ref, err = reference.WithTag(ref, opt.Tag)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
tagged = true
|
|
||||||
} else {
|
|
||||||
err := option.Apply(ms)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mediaType, p, err := m.Payload()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !tagged {
|
|
||||||
// generate a canonical digest and Put by digest
|
|
||||||
_, d, err := distribution.UnmarshalManifest(mediaType, p)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
ref, err = reference.WithDigest(ref, d.Digest)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
manifestURL, err := ms.ub.BuildManifestURL(ref)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
putRequest.Header.Set("Content-Type", mediaType)
|
|
||||||
|
|
||||||
resp, err := ms.client.Do(putRequest)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if SuccessStatus(resp.StatusCode) {
|
|
||||||
dgstHeader := resp.Header.Get("Docker-Content-Digest")
|
|
||||||
dgst, err := digest.ParseDigest(dgstHeader)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return dgst, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", HandleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
|
|
||||||
ref, err := reference.WithDigest(ms.name, dgst)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
u, err := ms.ub.BuildManifestURL(ref)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest("DELETE", u, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := ms.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if SuccessStatus(resp.StatusCode) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return HandleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// todo(richardscothern): Restore interface and implementation with merge of #1050
|
|
||||||
/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
|
|
||||||
panic("not supported")
|
|
||||||
}*/
|
|
||||||
|
|
||||||
type blobs struct {
|
|
||||||
name reference.Named
|
|
||||||
ub *v2.URLBuilder
|
|
||||||
client *http.Client
|
|
||||||
|
|
||||||
statter distribution.BlobDescriptorService
|
|
||||||
distribution.BlobDeleter
|
|
||||||
}
|
|
||||||
|
|
||||||
func sanitizeLocation(location, base string) (string, error) {
|
|
||||||
baseURL, err := url.Parse(base)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
locationURL, err := url.Parse(location)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return baseURL.ResolveReference(locationURL).String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
|
||||||
return bs.statter.Stat(ctx, dgst)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
|
|
||||||
reader, err := bs.Open(ctx, dgst)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer reader.Close()
|
|
||||||
|
|
||||||
return ioutil.ReadAll(reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
|
|
||||||
ref, err := reference.WithDigest(bs.name, dgst)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
blobURL, err := bs.ub.BuildBlobURL(ref)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return transport.NewHTTPReadSeeker(bs.client, blobURL,
|
|
||||||
func(resp *http.Response) error {
|
|
||||||
if resp.StatusCode == http.StatusNotFound {
|
|
||||||
return distribution.ErrBlobUnknown
|
|
||||||
}
|
|
||||||
return HandleErrorResponse(resp)
|
|
||||||
}), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
|
|
||||||
writer, err := bs.Create(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
dgstr := digest.Canonical.New()
|
|
||||||
n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
if n < int64(len(p)) {
|
|
||||||
return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p))
|
|
||||||
}
|
|
||||||
|
|
||||||
desc := distribution.Descriptor{
|
|
||||||
MediaType: mediaType,
|
|
||||||
Size: int64(len(p)),
|
|
||||||
Digest: dgstr.Digest(),
|
|
||||||
}
|
|
||||||
|
|
||||||
return writer.Commit(ctx, desc)
|
|
||||||
}
|
|
||||||
|
|
||||||
type optionFunc func(interface{}) error
|
|
||||||
|
|
||||||
func (f optionFunc) Apply(v interface{}) error {
|
|
||||||
return f(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMountFrom returns a BlobCreateOption which designates that the blob should be
|
|
||||||
// mounted from the given canonical reference.
|
|
||||||
func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
|
|
||||||
return optionFunc(func(v interface{}) error {
|
|
||||||
opts, ok := v.(*distribution.CreateOptions)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unexpected options type: %T", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Mount.ShouldMount = true
|
|
||||||
opts.Mount.From = ref
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
|
|
||||||
var opts distribution.CreateOptions
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
err := option.Apply(&opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var values []url.Values
|
|
||||||
|
|
||||||
if opts.Mount.ShouldMount {
|
|
||||||
values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
|
|
||||||
}
|
|
||||||
|
|
||||||
u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := bs.client.Post(u, "", nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
switch resp.StatusCode {
|
|
||||||
case http.StatusCreated:
|
|
||||||
desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
|
|
||||||
case http.StatusAccepted:
|
|
||||||
// TODO(dmcgowan): Check for invalid UUID
|
|
||||||
uuid := resp.Header.Get("Docker-Upload-UUID")
|
|
||||||
location, err := sanitizeLocation(resp.Header.Get("Location"), u)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &httpBlobUpload{
|
|
||||||
statter: bs.statter,
|
|
||||||
client: bs.client,
|
|
||||||
uuid: uuid,
|
|
||||||
startedAt: time.Now(),
|
|
||||||
location: location,
|
|
||||||
}, nil
|
|
||||||
default:
|
|
||||||
return nil, HandleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
|
|
||||||
return bs.statter.Clear(ctx, dgst)
|
|
||||||
}
|
|
||||||
|
|
||||||
type blobStatter struct {
|
|
||||||
name reference.Named
|
|
||||||
ub *v2.URLBuilder
|
|
||||||
client *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
|
||||||
ref, err := reference.WithDigest(bs.name, dgst)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
u, err := bs.ub.BuildBlobURL(ref)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := bs.client.Head(u)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if SuccessStatus(resp.StatusCode) {
|
|
||||||
lengthHeader := resp.Header.Get("Content-Length")
|
|
||||||
if lengthHeader == "" {
|
|
||||||
return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
|
|
||||||
}
|
|
||||||
|
|
||||||
length, err := strconv.ParseInt(lengthHeader, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return distribution.Descriptor{
|
|
||||||
MediaType: resp.Header.Get("Content-Type"),
|
|
||||||
Size: length,
|
|
||||||
Digest: dgst,
|
|
||||||
}, nil
|
|
||||||
} else if resp.StatusCode == http.StatusNotFound {
|
|
||||||
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
|
||||||
}
|
|
||||||
return distribution.Descriptor{}, HandleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCatalogValues(maxEntries int, last string) url.Values {
|
|
||||||
values := url.Values{}
|
|
||||||
|
|
||||||
if maxEntries > 0 {
|
|
||||||
values.Add("n", strconv.Itoa(maxEntries))
|
|
||||||
}
|
|
||||||
|
|
||||||
if last != "" {
|
|
||||||
values.Add("last", last)
|
|
||||||
}
|
|
||||||
|
|
||||||
return values
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
|
|
||||||
ref, err := reference.WithDigest(bs.name, dgst)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
blobURL, err := bs.ub.BuildBlobURL(ref)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("DELETE", blobURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := bs.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if SuccessStatus(resp.StatusCode) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return HandleErrorResponse(resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
251
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
251
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
|
@ -1,251 +0,0 @@
|
||||||
package transport
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
|
|
||||||
|
|
||||||
// ErrWrongCodeForByteRange is returned if the client sends a request
|
|
||||||
// with a Range header but the server returns a 2xx or 3xx code other
|
|
||||||
// than 206 Partial Content.
|
|
||||||
ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReadSeekCloser combines io.ReadSeeker with io.Closer.
|
|
||||||
type ReadSeekCloser interface {
|
|
||||||
io.ReadSeeker
|
|
||||||
io.Closer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
|
|
||||||
// request. When seeking and starting a read from a non-zero offset
|
|
||||||
// the a "Range" header will be added which sets the offset.
|
|
||||||
// TODO(dmcgowan): Move this into a separate utility package
|
|
||||||
func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser {
|
|
||||||
return &httpReadSeeker{
|
|
||||||
client: client,
|
|
||||||
url: url,
|
|
||||||
errorHandler: errorHandler,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpReadSeeker struct {
|
|
||||||
client *http.Client
|
|
||||||
url string
|
|
||||||
|
|
||||||
// errorHandler creates an error from an unsuccessful HTTP response.
|
|
||||||
// This allows the error to be created with the HTTP response body
|
|
||||||
// without leaking the body through a returned error.
|
|
||||||
errorHandler func(*http.Response) error
|
|
||||||
|
|
||||||
size int64
|
|
||||||
|
|
||||||
// rc is the remote read closer.
|
|
||||||
rc io.ReadCloser
|
|
||||||
// readerOffset tracks the offset as of the last read.
|
|
||||||
readerOffset int64
|
|
||||||
// seekOffset allows Seek to override the offset. Seek changes
|
|
||||||
// seekOffset instead of changing readOffset directly so that
|
|
||||||
// connection resets can be delayed and possibly avoided if the
|
|
||||||
// seek is undone (i.e. seeking to the end and then back to the
|
|
||||||
// beginning).
|
|
||||||
seekOffset int64
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
|
|
||||||
if hrs.err != nil {
|
|
||||||
return 0, hrs.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we sought to a different position, we need to reset the
|
|
||||||
// connection. This logic is here instead of Seek so that if
|
|
||||||
// a seek is undone before the next read, the connection doesn't
|
|
||||||
// need to be closed and reopened. A common example of this is
|
|
||||||
// seeking to the end to determine the length, and then seeking
|
|
||||||
// back to the original position.
|
|
||||||
if hrs.readerOffset != hrs.seekOffset {
|
|
||||||
hrs.reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
hrs.readerOffset = hrs.seekOffset
|
|
||||||
|
|
||||||
rd, err := hrs.reader()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err = rd.Read(p)
|
|
||||||
hrs.seekOffset += int64(n)
|
|
||||||
hrs.readerOffset += int64(n)
|
|
||||||
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
if hrs.err != nil {
|
|
||||||
return 0, hrs.err
|
|
||||||
}
|
|
||||||
|
|
||||||
lastReaderOffset := hrs.readerOffset
|
|
||||||
|
|
||||||
if whence == os.SEEK_SET && hrs.rc == nil {
|
|
||||||
// If no request has been made yet, and we are seeking to an
|
|
||||||
// absolute position, set the read offset as well to avoid an
|
|
||||||
// unnecessary request.
|
|
||||||
hrs.readerOffset = offset
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := hrs.reader()
|
|
||||||
if err != nil {
|
|
||||||
hrs.readerOffset = lastReaderOffset
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
newOffset := hrs.seekOffset
|
|
||||||
|
|
||||||
switch whence {
|
|
||||||
case os.SEEK_CUR:
|
|
||||||
newOffset += offset
|
|
||||||
case os.SEEK_END:
|
|
||||||
if hrs.size < 0 {
|
|
||||||
return 0, errors.New("content length not known")
|
|
||||||
}
|
|
||||||
newOffset = hrs.size + offset
|
|
||||||
case os.SEEK_SET:
|
|
||||||
newOffset = offset
|
|
||||||
}
|
|
||||||
|
|
||||||
if newOffset < 0 {
|
|
||||||
err = errors.New("cannot seek to negative position")
|
|
||||||
} else {
|
|
||||||
hrs.seekOffset = newOffset
|
|
||||||
}
|
|
||||||
|
|
||||||
return hrs.seekOffset, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hrs *httpReadSeeker) Close() error {
|
|
||||||
if hrs.err != nil {
|
|
||||||
return hrs.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// close and release reader chain
|
|
||||||
if hrs.rc != nil {
|
|
||||||
hrs.rc.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
hrs.rc = nil
|
|
||||||
|
|
||||||
hrs.err = errors.New("httpLayer: closed")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hrs *httpReadSeeker) reset() {
|
|
||||||
if hrs.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if hrs.rc != nil {
|
|
||||||
hrs.rc.Close()
|
|
||||||
hrs.rc = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
|
||||||
if hrs.err != nil {
|
|
||||||
return nil, hrs.err
|
|
||||||
}
|
|
||||||
|
|
||||||
if hrs.rc != nil {
|
|
||||||
return hrs.rc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", hrs.url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if hrs.readerOffset > 0 {
|
|
||||||
// If we are at different offset, issue a range request from there.
|
|
||||||
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset))
|
|
||||||
// TODO: get context in here
|
|
||||||
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Add("Accept-Encoding", "identity")
|
|
||||||
resp, err := hrs.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normally would use client.SuccessStatus, but that would be a cyclic
|
|
||||||
// import
|
|
||||||
if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
|
|
||||||
if hrs.readerOffset > 0 {
|
|
||||||
if resp.StatusCode != http.StatusPartialContent {
|
|
||||||
return nil, ErrWrongCodeForByteRange
|
|
||||||
}
|
|
||||||
|
|
||||||
contentRange := resp.Header.Get("Content-Range")
|
|
||||||
if contentRange == "" {
|
|
||||||
return nil, errors.New("no Content-Range header found in HTTP 206 response")
|
|
||||||
}
|
|
||||||
|
|
||||||
submatches := contentRangeRegexp.FindStringSubmatch(contentRange)
|
|
||||||
if len(submatches) < 4 {
|
|
||||||
return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange)
|
|
||||||
}
|
|
||||||
|
|
||||||
startByte, err := strconv.ParseUint(submatches[1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange)
|
|
||||||
}
|
|
||||||
|
|
||||||
if startByte != uint64(hrs.readerOffset) {
|
|
||||||
return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset)
|
|
||||||
}
|
|
||||||
|
|
||||||
endByte, err := strconv.ParseUint(submatches[2], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange)
|
|
||||||
}
|
|
||||||
|
|
||||||
if submatches[3] == "*" {
|
|
||||||
hrs.size = -1
|
|
||||||
} else {
|
|
||||||
size, err := strconv.ParseUint(submatches[3], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange)
|
|
||||||
}
|
|
||||||
|
|
||||||
if endByte+1 != size {
|
|
||||||
return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange)
|
|
||||||
}
|
|
||||||
|
|
||||||
hrs.size = int64(size)
|
|
||||||
}
|
|
||||||
} else if resp.StatusCode == http.StatusOK {
|
|
||||||
hrs.size = resp.ContentLength
|
|
||||||
} else {
|
|
||||||
hrs.size = -1
|
|
||||||
}
|
|
||||||
hrs.rc = resp.Body
|
|
||||||
} else {
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if hrs.errorHandler != nil {
|
|
||||||
return nil, hrs.errorHandler(resp)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
return hrs.rc, nil
|
|
||||||
}
|
|
147
vendor/github.com/docker/distribution/registry/client/transport/transport.go
generated
vendored
147
vendor/github.com/docker/distribution/registry/client/transport/transport.go
generated
vendored
|
@ -1,147 +0,0 @@
|
||||||
package transport
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RequestModifier represents an object which will do an inplace
|
|
||||||
// modification of an HTTP request.
|
|
||||||
type RequestModifier interface {
|
|
||||||
ModifyRequest(*http.Request) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type headerModifier http.Header
|
|
||||||
|
|
||||||
// NewHeaderRequestModifier returns a new RequestModifier which will
|
|
||||||
// add the given headers to a request.
|
|
||||||
func NewHeaderRequestModifier(header http.Header) RequestModifier {
|
|
||||||
return headerModifier(header)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h headerModifier) ModifyRequest(req *http.Request) error {
|
|
||||||
for k, s := range http.Header(h) {
|
|
||||||
req.Header[k] = append(req.Header[k], s...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTransport creates a new transport which will apply modifiers to
|
|
||||||
// the request on a RoundTrip call.
|
|
||||||
func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
|
|
||||||
return &transport{
|
|
||||||
Modifiers: modifiers,
|
|
||||||
Base: base,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// transport is an http.RoundTripper that makes HTTP requests after
|
|
||||||
// copying and modifying the request
|
|
||||||
type transport struct {
|
|
||||||
Modifiers []RequestModifier
|
|
||||||
Base http.RoundTripper
|
|
||||||
|
|
||||||
mu sync.Mutex // guards modReq
|
|
||||||
modReq map[*http.Request]*http.Request // original -> modified
|
|
||||||
}
|
|
||||||
|
|
||||||
// RoundTrip authorizes and authenticates the request with an
|
|
||||||
// access token. If no token exists or token is expired,
|
|
||||||
// tries to refresh/fetch a new token.
|
|
||||||
func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
|
||||||
req2 := cloneRequest(req)
|
|
||||||
for _, modifier := range t.Modifiers {
|
|
||||||
if err := modifier.ModifyRequest(req2); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
t.setModReq(req, req2)
|
|
||||||
res, err := t.base().RoundTrip(req2)
|
|
||||||
if err != nil {
|
|
||||||
t.setModReq(req, nil)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res.Body = &onEOFReader{
|
|
||||||
rc: res.Body,
|
|
||||||
fn: func() { t.setModReq(req, nil) },
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CancelRequest cancels an in-flight request by closing its connection.
|
|
||||||
func (t *transport) CancelRequest(req *http.Request) {
|
|
||||||
type canceler interface {
|
|
||||||
CancelRequest(*http.Request)
|
|
||||||
}
|
|
||||||
if cr, ok := t.base().(canceler); ok {
|
|
||||||
t.mu.Lock()
|
|
||||||
modReq := t.modReq[req]
|
|
||||||
delete(t.modReq, req)
|
|
||||||
t.mu.Unlock()
|
|
||||||
cr.CancelRequest(modReq)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *transport) base() http.RoundTripper {
|
|
||||||
if t.Base != nil {
|
|
||||||
return t.Base
|
|
||||||
}
|
|
||||||
return http.DefaultTransport
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *transport) setModReq(orig, mod *http.Request) {
|
|
||||||
t.mu.Lock()
|
|
||||||
defer t.mu.Unlock()
|
|
||||||
if t.modReq == nil {
|
|
||||||
t.modReq = make(map[*http.Request]*http.Request)
|
|
||||||
}
|
|
||||||
if mod == nil {
|
|
||||||
delete(t.modReq, orig)
|
|
||||||
} else {
|
|
||||||
t.modReq[orig] = mod
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// cloneRequest returns a clone of the provided *http.Request.
|
|
||||||
// The clone is a shallow copy of the struct and its Header map.
|
|
||||||
func cloneRequest(r *http.Request) *http.Request {
|
|
||||||
// shallow copy of the struct
|
|
||||||
r2 := new(http.Request)
|
|
||||||
*r2 = *r
|
|
||||||
// deep copy of the Header
|
|
||||||
r2.Header = make(http.Header, len(r.Header))
|
|
||||||
for k, s := range r.Header {
|
|
||||||
r2.Header[k] = append([]string(nil), s...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r2
|
|
||||||
}
|
|
||||||
|
|
||||||
type onEOFReader struct {
|
|
||||||
rc io.ReadCloser
|
|
||||||
fn func()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *onEOFReader) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = r.rc.Read(p)
|
|
||||||
if err == io.EOF {
|
|
||||||
r.runFunc()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *onEOFReader) Close() error {
|
|
||||||
err := r.rc.Close()
|
|
||||||
r.runFunc()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *onEOFReader) runFunc() {
|
|
||||||
if fn := r.fn; fn != nil {
|
|
||||||
fn()
|
|
||||||
r.fn = nil
|
|
||||||
}
|
|
||||||
}
|
|
35
vendor/github.com/docker/distribution/registry/storage/cache/cache.go
generated
vendored
35
vendor/github.com/docker/distribution/registry/storage/cache/cache.go
generated
vendored
|
@ -1,35 +0,0 @@
|
||||||
// Package cache provides facilities to speed up access to the storage
|
|
||||||
// backend.
|
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/docker/distribution"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BlobDescriptorCacheProvider provides repository scoped
|
|
||||||
// BlobDescriptorService cache instances and a global descriptor cache.
|
|
||||||
type BlobDescriptorCacheProvider interface {
|
|
||||||
distribution.BlobDescriptorService
|
|
||||||
|
|
||||||
RepositoryScoped(repo string) (distribution.BlobDescriptorService, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateDescriptor provides a helper function to ensure that caches have
|
|
||||||
// common criteria for admitting descriptors.
|
|
||||||
func ValidateDescriptor(desc distribution.Descriptor) error {
|
|
||||||
if err := desc.Digest.Validate(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if desc.Size < 0 {
|
|
||||||
return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if desc.MediaType == "" {
|
|
||||||
return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
101
vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
generated
vendored
101
vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
generated
vendored
|
@ -1,101 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/docker/distribution/context"
|
|
||||||
"github.com/docker/distribution/digest"
|
|
||||||
|
|
||||||
"github.com/docker/distribution"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Metrics is used to hold metric counters
|
|
||||||
// related to the number of times a cache was
|
|
||||||
// hit or missed.
|
|
||||||
type Metrics struct {
|
|
||||||
Requests uint64
|
|
||||||
Hits uint64
|
|
||||||
Misses uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// MetricsTracker represents a metric tracker
|
|
||||||
// which simply counts the number of hits and misses.
|
|
||||||
type MetricsTracker interface {
|
|
||||||
Hit()
|
|
||||||
Miss()
|
|
||||||
Metrics() Metrics
|
|
||||||
}
|
|
||||||
|
|
||||||
type cachedBlobStatter struct {
|
|
||||||
cache distribution.BlobDescriptorService
|
|
||||||
backend distribution.BlobDescriptorService
|
|
||||||
tracker MetricsTracker
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCachedBlobStatter creates a new statter which prefers a cache and
|
|
||||||
// falls back to a backend.
|
|
||||||
func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
|
|
||||||
return &cachedBlobStatter{
|
|
||||||
cache: cache,
|
|
||||||
backend: backend,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and
|
|
||||||
// falls back to a backend. Hits and misses will send to the tracker.
|
|
||||||
func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter {
|
|
||||||
return &cachedBlobStatter{
|
|
||||||
cache: cache,
|
|
||||||
backend: backend,
|
|
||||||
tracker: tracker,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
|
||||||
desc, err := cbds.cache.Stat(ctx, dgst)
|
|
||||||
if err != nil {
|
|
||||||
if err != distribution.ErrBlobUnknown {
|
|
||||||
context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
goto fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
if cbds.tracker != nil {
|
|
||||||
cbds.tracker.Hit()
|
|
||||||
}
|
|
||||||
return desc, nil
|
|
||||||
fallback:
|
|
||||||
if cbds.tracker != nil {
|
|
||||||
cbds.tracker.Miss()
|
|
||||||
}
|
|
||||||
desc, err = cbds.backend.Stat(ctx, dgst)
|
|
||||||
if err != nil {
|
|
||||||
return desc, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
|
|
||||||
context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return desc, err
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
|
|
||||||
err := cbds.cache.Clear(ctx, dgst)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = cbds.backend.Clear(ctx, dgst)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
|
||||||
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
|
|
||||||
context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
179
vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
generated
vendored
179
vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
generated
vendored
|
@ -1,179 +0,0 @@
|
||||||
package memory
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/docker/distribution"
|
|
||||||
"github.com/docker/distribution/context"
|
|
||||||
"github.com/docker/distribution/digest"
|
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
"github.com/docker/distribution/registry/storage/cache"
|
|
||||||
)
|
|
||||||
|
|
||||||
type inMemoryBlobDescriptorCacheProvider struct {
|
|
||||||
global *mapBlobDescriptorCache
|
|
||||||
repositories map[string]*mapBlobDescriptorCache
|
|
||||||
mu sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for
|
|
||||||
// storing blob descriptor data.
|
|
||||||
func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider {
|
|
||||||
return &inMemoryBlobDescriptorCacheProvider{
|
|
||||||
global: newMapBlobDescriptorCache(),
|
|
||||||
repositories: make(map[string]*mapBlobDescriptorCache),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
|
|
||||||
if _, err := reference.ParseNamed(repo); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
imbdcp.mu.RLock()
|
|
||||||
defer imbdcp.mu.RUnlock()
|
|
||||||
|
|
||||||
return &repositoryScopedInMemoryBlobDescriptorCache{
|
|
||||||
repo: repo,
|
|
||||||
parent: imbdcp,
|
|
||||||
repository: imbdcp.repositories[repo],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
|
||||||
return imbdcp.global.Stat(ctx, dgst)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error {
|
|
||||||
return imbdcp.global.Clear(ctx, dgst)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
|
||||||
_, err := imbdcp.Stat(ctx, dgst)
|
|
||||||
if err == distribution.ErrBlobUnknown {
|
|
||||||
|
|
||||||
if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest {
|
|
||||||
// if the digests differ, set the other canonical mapping
|
|
||||||
if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// unknown, just set it
|
|
||||||
return imbdcp.global.SetDescriptor(ctx, dgst, desc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// we already know it, do nothing
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped
|
|
||||||
// repository cache. Instances are not thread-safe but the delegated
|
|
||||||
// operations are.
|
|
||||||
type repositoryScopedInMemoryBlobDescriptorCache struct {
|
|
||||||
repo string
|
|
||||||
parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map
|
|
||||||
repository *mapBlobDescriptorCache
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
|
||||||
rsimbdcp.parent.mu.Lock()
|
|
||||||
repo := rsimbdcp.repository
|
|
||||||
rsimbdcp.parent.mu.Unlock()
|
|
||||||
|
|
||||||
if repo == nil {
|
|
||||||
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
|
||||||
}
|
|
||||||
|
|
||||||
return repo.Stat(ctx, dgst)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
|
|
||||||
rsimbdcp.parent.mu.Lock()
|
|
||||||
repo := rsimbdcp.repository
|
|
||||||
rsimbdcp.parent.mu.Unlock()
|
|
||||||
|
|
||||||
if repo == nil {
|
|
||||||
return distribution.ErrBlobUnknown
|
|
||||||
}
|
|
||||||
|
|
||||||
return repo.Clear(ctx, dgst)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
|
||||||
rsimbdcp.parent.mu.Lock()
|
|
||||||
repo := rsimbdcp.repository
|
|
||||||
if repo == nil {
|
|
||||||
// allocate map since we are setting it now.
|
|
||||||
var ok bool
|
|
||||||
// have to read back value since we may have allocated elsewhere.
|
|
||||||
repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo]
|
|
||||||
if !ok {
|
|
||||||
repo = newMapBlobDescriptorCache()
|
|
||||||
rsimbdcp.parent.repositories[rsimbdcp.repo] = repo
|
|
||||||
}
|
|
||||||
rsimbdcp.repository = repo
|
|
||||||
}
|
|
||||||
rsimbdcp.parent.mu.Unlock()
|
|
||||||
|
|
||||||
if err := repo.SetDescriptor(ctx, dgst, desc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// mapBlobDescriptorCache provides a simple map-based implementation of the
|
|
||||||
// descriptor cache.
|
|
||||||
type mapBlobDescriptorCache struct {
|
|
||||||
descriptors map[digest.Digest]distribution.Descriptor
|
|
||||||
mu sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{}
|
|
||||||
|
|
||||||
func newMapBlobDescriptorCache() *mapBlobDescriptorCache {
|
|
||||||
return &mapBlobDescriptorCache{
|
|
||||||
descriptors: make(map[digest.Digest]distribution.Descriptor),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
|
||||||
if err := dgst.Validate(); err != nil {
|
|
||||||
return distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
mbdc.mu.RLock()
|
|
||||||
defer mbdc.mu.RUnlock()
|
|
||||||
|
|
||||||
desc, ok := mbdc.descriptors[dgst]
|
|
||||||
if !ok {
|
|
||||||
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
|
||||||
}
|
|
||||||
|
|
||||||
return desc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
|
|
||||||
mbdc.mu.Lock()
|
|
||||||
defer mbdc.mu.Unlock()
|
|
||||||
|
|
||||||
delete(mbdc.descriptors, dgst)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
|
||||||
if err := dgst.Validate(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cache.ValidateDescriptor(desc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
mbdc.mu.Lock()
|
|
||||||
defer mbdc.mu.Unlock()
|
|
||||||
|
|
||||||
mbdc.descriptors[dgst] = desc
|
|
||||||
return nil
|
|
||||||
}
|
|
126
vendor/github.com/docker/distribution/uuid/uuid.go
generated
vendored
126
vendor/github.com/docker/distribution/uuid/uuid.go
generated
vendored
|
@ -1,126 +0,0 @@
|
||||||
// Package uuid provides simple UUID generation. Only version 4 style UUIDs
|
|
||||||
// can be generated.
|
|
||||||
//
|
|
||||||
// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs.
|
|
||||||
package uuid
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Bits is the number of bits in a UUID
|
|
||||||
Bits = 128
|
|
||||||
|
|
||||||
// Size is the number of bytes in a UUID
|
|
||||||
Size = Bits / 8
|
|
||||||
|
|
||||||
format = "%08x-%04x-%04x-%04x-%012x"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrUUIDInvalid indicates a parsed string is not a valid uuid.
|
|
||||||
ErrUUIDInvalid = fmt.Errorf("invalid uuid")
|
|
||||||
|
|
||||||
// Loggerf can be used to override the default logging destination. Such
|
|
||||||
// log messages in this library should be logged at warning or higher.
|
|
||||||
Loggerf = func(format string, args ...interface{}) {}
|
|
||||||
)
|
|
||||||
|
|
||||||
// UUID represents a UUID value. UUIDs can be compared and set to other values
|
|
||||||
// and accessed by byte.
|
|
||||||
type UUID [Size]byte
|
|
||||||
|
|
||||||
// Generate creates a new, version 4 uuid.
|
|
||||||
func Generate() (u UUID) {
|
|
||||||
const (
|
|
||||||
// ensures we backoff for less than 450ms total. Use the following to
|
|
||||||
// select new value, in units of 10ms:
|
|
||||||
// n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
|
|
||||||
maxretries = 9
|
|
||||||
backoff = time.Millisecond * 10
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
totalBackoff time.Duration
|
|
||||||
count int
|
|
||||||
retries int
|
|
||||||
)
|
|
||||||
|
|
||||||
for {
|
|
||||||
// This should never block but the read may fail. Because of this,
|
|
||||||
// we just try to read the random number generator until we get
|
|
||||||
// something. This is a very rare condition but may happen.
|
|
||||||
b := time.Duration(retries) * backoff
|
|
||||||
time.Sleep(b)
|
|
||||||
totalBackoff += b
|
|
||||||
|
|
||||||
n, err := io.ReadFull(rand.Reader, u[count:])
|
|
||||||
if err != nil {
|
|
||||||
if retryOnError(err) && retries < maxretries {
|
|
||||||
count += n
|
|
||||||
retries++
|
|
||||||
Loggerf("error generating version 4 uuid, retrying: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Any other errors represent a system problem. What did someone
|
|
||||||
// do to /dev/urandom?
|
|
||||||
panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
u[6] = (u[6] & 0x0f) | 0x40 // set version byte
|
|
||||||
u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b}
|
|
||||||
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse attempts to extract a uuid from the string or returns an error.
|
|
||||||
func Parse(s string) (u UUID, err error) {
|
|
||||||
if len(s) != 36 {
|
|
||||||
return UUID{}, ErrUUIDInvalid
|
|
||||||
}
|
|
||||||
|
|
||||||
// create stack addresses for each section of the uuid.
|
|
||||||
p := make([][]byte, 5)
|
|
||||||
|
|
||||||
if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil {
|
|
||||||
return u, err
|
|
||||||
}
|
|
||||||
|
|
||||||
copy(u[0:4], p[0])
|
|
||||||
copy(u[4:6], p[1])
|
|
||||||
copy(u[6:8], p[2])
|
|
||||||
copy(u[8:10], p[3])
|
|
||||||
copy(u[10:16], p[4])
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u UUID) String() string {
|
|
||||||
return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryOnError tries to detect whether or not retrying would be fruitful.
|
|
||||||
func retryOnError(err error) bool {
|
|
||||||
switch err := err.(type) {
|
|
||||||
case *os.PathError:
|
|
||||||
return retryOnError(err.Err) // unpack the target error
|
|
||||||
case syscall.Errno:
|
|
||||||
if err == syscall.EPERM {
|
|
||||||
// EPERM represents an entropy pool exhaustion, a condition under
|
|
||||||
// which we backoff and retry.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
22
vendor/github.com/docker/docker/api/types/auth.go
generated
vendored
22
vendor/github.com/docker/docker/api/types/auth.go
generated
vendored
|
@ -1,22 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// AuthConfig contains authorization information for connecting to a Registry
|
|
||||||
type AuthConfig struct {
|
|
||||||
Username string `json:"username,omitempty"`
|
|
||||||
Password string `json:"password,omitempty"`
|
|
||||||
Auth string `json:"auth,omitempty"`
|
|
||||||
|
|
||||||
// Email is an optional value associated with the username.
|
|
||||||
// This field is deprecated and will be removed in a later
|
|
||||||
// version of docker.
|
|
||||||
Email string `json:"email,omitempty"`
|
|
||||||
|
|
||||||
ServerAddress string `json:"serveraddress,omitempty"`
|
|
||||||
|
|
||||||
// IdentityToken is used to authenticate the user and get
|
|
||||||
// an access token for the registry.
|
|
||||||
IdentityToken string `json:"identitytoken,omitempty"`
|
|
||||||
|
|
||||||
// RegistryToken is a bearer token to be sent to a registry
|
|
||||||
RegistryToken string `json:"registrytoken,omitempty"`
|
|
||||||
}
|
|
84
vendor/github.com/docker/docker/api/types/backend/backend.go
generated
vendored
84
vendor/github.com/docker/docker/api/types/backend/backend.go
generated
vendored
|
@ -1,84 +0,0 @@
|
||||||
// Package backend includes types to send information to server backends.
|
|
||||||
package backend
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/pkg/streamformatter"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ContainerAttachConfig holds the streams to use when connecting to a container to view logs.
|
|
||||||
type ContainerAttachConfig struct {
|
|
||||||
GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error)
|
|
||||||
UseStdin bool
|
|
||||||
UseStdout bool
|
|
||||||
UseStderr bool
|
|
||||||
Logs bool
|
|
||||||
Stream bool
|
|
||||||
DetachKeys string
|
|
||||||
|
|
||||||
// Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/sderr messages accordingly.
|
|
||||||
// TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change...
|
|
||||||
// HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream.
|
|
||||||
// Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately.
|
|
||||||
MuxStreams bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerLogsConfig holds configs for logging operations. Exists
|
|
||||||
// for users of the backend to to pass it a logging configuration.
|
|
||||||
type ContainerLogsConfig struct {
|
|
||||||
types.ContainerLogsOptions
|
|
||||||
OutStream io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerStatsConfig holds information for configuring the runtime
|
|
||||||
// behavior of a backend.ContainerStats() call.
|
|
||||||
type ContainerStatsConfig struct {
|
|
||||||
Stream bool
|
|
||||||
OutStream io.Writer
|
|
||||||
Version string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecInspect holds information about a running process started
|
|
||||||
// with docker exec.
|
|
||||||
type ExecInspect struct {
|
|
||||||
ID string
|
|
||||||
Running bool
|
|
||||||
ExitCode *int
|
|
||||||
ProcessConfig *ExecProcessConfig
|
|
||||||
OpenStdin bool
|
|
||||||
OpenStderr bool
|
|
||||||
OpenStdout bool
|
|
||||||
CanRemove bool
|
|
||||||
ContainerID string
|
|
||||||
DetachKeys []byte
|
|
||||||
Pid int
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecProcessConfig holds information about the exec process
|
|
||||||
// running on the host.
|
|
||||||
type ExecProcessConfig struct {
|
|
||||||
Tty bool `json:"tty"`
|
|
||||||
Entrypoint string `json:"entrypoint"`
|
|
||||||
Arguments []string `json:"arguments"`
|
|
||||||
Privileged *bool `json:"privileged,omitempty"`
|
|
||||||
User string `json:"user,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerCommitConfig is a wrapper around
|
|
||||||
// types.ContainerCommitConfig that also
|
|
||||||
// transports configuration changes for a container.
|
|
||||||
type ContainerCommitConfig struct {
|
|
||||||
types.ContainerCommitConfig
|
|
||||||
Changes []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProgressWriter is an interface
|
|
||||||
// to transport progress streams.
|
|
||||||
type ProgressWriter struct {
|
|
||||||
Output io.Writer
|
|
||||||
StdoutFormatter *streamformatter.StdoutFormatter
|
|
||||||
StderrFormatter *streamformatter.StderrFormatter
|
|
||||||
ProgressReaderFunc func(io.ReadCloser) io.ReadCloser
|
|
||||||
}
|
|
23
vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
generated
vendored
23
vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
generated
vendored
|
@ -1,23 +0,0 @@
|
||||||
package blkiodev
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// WeightDevice is a structure that holds device:weight pair
|
|
||||||
type WeightDevice struct {
|
|
||||||
Path string
|
|
||||||
Weight uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WeightDevice) String() string {
|
|
||||||
return fmt.Sprintf("%s:%d", w.Path, w.Weight)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ThrottleDevice is a structure that holds device:rate_per_second pair
|
|
||||||
type ThrottleDevice struct {
|
|
||||||
Path string
|
|
||||||
Rate uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *ThrottleDevice) String() string {
|
|
||||||
return fmt.Sprintf("%s:%d", t.Path, t.Rate)
|
|
||||||
}
|
|
378
vendor/github.com/docker/docker/api/types/client.go
generated
vendored
378
vendor/github.com/docker/docker/api/types/client.go
generated
vendored
|
@ -1,378 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
|
||||||
"github.com/docker/go-units"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CheckpointCreateOptions holds parameters to create a checkpoint from a container
|
|
||||||
type CheckpointCreateOptions struct {
|
|
||||||
CheckpointID string
|
|
||||||
CheckpointDir string
|
|
||||||
Exit bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckpointListOptions holds parameters to list checkpoints for a container
|
|
||||||
type CheckpointListOptions struct {
|
|
||||||
CheckpointDir string
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
|
|
||||||
type CheckpointDeleteOptions struct {
|
|
||||||
CheckpointID string
|
|
||||||
CheckpointDir string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerAttachOptions holds parameters to attach to a container.
|
|
||||||
type ContainerAttachOptions struct {
|
|
||||||
Stream bool
|
|
||||||
Stdin bool
|
|
||||||
Stdout bool
|
|
||||||
Stderr bool
|
|
||||||
DetachKeys string
|
|
||||||
Logs bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerCommitOptions holds parameters to commit changes into a container.
|
|
||||||
type ContainerCommitOptions struct {
|
|
||||||
Reference string
|
|
||||||
Comment string
|
|
||||||
Author string
|
|
||||||
Changes []string
|
|
||||||
Pause bool
|
|
||||||
Config *container.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerExecInspect holds information returned by exec inspect.
|
|
||||||
type ContainerExecInspect struct {
|
|
||||||
ExecID string
|
|
||||||
ContainerID string
|
|
||||||
Running bool
|
|
||||||
ExitCode int
|
|
||||||
Pid int
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerListOptions holds parameters to list containers with.
|
|
||||||
type ContainerListOptions struct {
|
|
||||||
Quiet bool
|
|
||||||
Size bool
|
|
||||||
All bool
|
|
||||||
Latest bool
|
|
||||||
Since string
|
|
||||||
Before string
|
|
||||||
Limit int
|
|
||||||
Filters filters.Args
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerLogsOptions holds parameters to filter logs with.
|
|
||||||
type ContainerLogsOptions struct {
|
|
||||||
ShowStdout bool
|
|
||||||
ShowStderr bool
|
|
||||||
Since string
|
|
||||||
Timestamps bool
|
|
||||||
Follow bool
|
|
||||||
Tail string
|
|
||||||
Details bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerRemoveOptions holds parameters to remove containers.
|
|
||||||
type ContainerRemoveOptions struct {
|
|
||||||
RemoveVolumes bool
|
|
||||||
RemoveLinks bool
|
|
||||||
Force bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerStartOptions holds parameters to start containers.
|
|
||||||
type ContainerStartOptions struct {
|
|
||||||
CheckpointID string
|
|
||||||
CheckpointDir string
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyToContainerOptions holds information
|
|
||||||
// about files to copy into a container
|
|
||||||
type CopyToContainerOptions struct {
|
|
||||||
AllowOverwriteDirWithFile bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// EventsOptions holds parameters to filter events with.
|
|
||||||
type EventsOptions struct {
|
|
||||||
Since string
|
|
||||||
Until string
|
|
||||||
Filters filters.Args
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkListOptions holds parameters to filter the list of networks with.
|
|
||||||
type NetworkListOptions struct {
|
|
||||||
Filters filters.Args
|
|
||||||
}
|
|
||||||
|
|
||||||
// HijackedResponse holds connection information for a hijacked request.
|
|
||||||
type HijackedResponse struct {
|
|
||||||
Conn net.Conn
|
|
||||||
Reader *bufio.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the hijacked connection and reader.
|
|
||||||
func (h *HijackedResponse) Close() {
|
|
||||||
h.Conn.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloseWriter is an interface that implements structs
|
|
||||||
// that close input streams to prevent from writing.
|
|
||||||
type CloseWriter interface {
|
|
||||||
CloseWrite() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloseWrite closes a readWriter for writing.
|
|
||||||
func (h *HijackedResponse) CloseWrite() error {
|
|
||||||
if conn, ok := h.Conn.(CloseWriter); ok {
|
|
||||||
return conn.CloseWrite()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageBuildOptions holds the information
|
|
||||||
// necessary to build images.
|
|
||||||
type ImageBuildOptions struct {
|
|
||||||
Tags []string
|
|
||||||
SuppressOutput bool
|
|
||||||
RemoteContext string
|
|
||||||
NoCache bool
|
|
||||||
Remove bool
|
|
||||||
ForceRemove bool
|
|
||||||
PullParent bool
|
|
||||||
Isolation container.Isolation
|
|
||||||
CPUSetCPUs string
|
|
||||||
CPUSetMems string
|
|
||||||
CPUShares int64
|
|
||||||
CPUQuota int64
|
|
||||||
CPUPeriod int64
|
|
||||||
Memory int64
|
|
||||||
MemorySwap int64
|
|
||||||
CgroupParent string
|
|
||||||
NetworkMode string
|
|
||||||
ShmSize int64
|
|
||||||
Dockerfile string
|
|
||||||
Ulimits []*units.Ulimit
|
|
||||||
// See the parsing of buildArgs in api/server/router/build/build_routes.go
|
|
||||||
// for an explaination of why BuildArgs needs to use *string instead of
|
|
||||||
// just a string
|
|
||||||
BuildArgs map[string]*string
|
|
||||||
AuthConfigs map[string]AuthConfig
|
|
||||||
Context io.Reader
|
|
||||||
Labels map[string]string
|
|
||||||
// squash the resulting image's layers to the parent
|
|
||||||
// preserves the original image and creates a new one from the parent with all
|
|
||||||
// the changes applied to a single layer
|
|
||||||
Squash bool
|
|
||||||
// CacheFrom specifies images that are used for matching cache. Images
|
|
||||||
// specified here do not need to have a valid parent chain to match cache.
|
|
||||||
CacheFrom []string
|
|
||||||
SecurityOpt []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageBuildResponse holds information
|
|
||||||
// returned by a server after building
|
|
||||||
// an image.
|
|
||||||
type ImageBuildResponse struct {
|
|
||||||
Body io.ReadCloser
|
|
||||||
OSType string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageCreateOptions holds information to create images.
|
|
||||||
type ImageCreateOptions struct {
|
|
||||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageImportSource holds source information for ImageImport
|
|
||||||
type ImageImportSource struct {
|
|
||||||
Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName)
|
|
||||||
SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageImportOptions holds information to import images from the client host.
|
|
||||||
type ImageImportOptions struct {
|
|
||||||
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
|
||||||
Message string // Message is the message to tag the image with
|
|
||||||
Changes []string // Changes are the raw changes to apply to this image
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageListOptions holds parameters to filter the list of images with.
|
|
||||||
type ImageListOptions struct {
|
|
||||||
All bool
|
|
||||||
Filters filters.Args
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageLoadResponse returns information to the client about a load process.
|
|
||||||
type ImageLoadResponse struct {
|
|
||||||
// Body must be closed to avoid a resource leak
|
|
||||||
Body io.ReadCloser
|
|
||||||
JSON bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImagePullOptions holds information to pull images.
|
|
||||||
type ImagePullOptions struct {
|
|
||||||
All bool
|
|
||||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
|
||||||
PrivilegeFunc RequestPrivilegeFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequestPrivilegeFunc is a function interface that
|
|
||||||
// clients can supply to retry operations after
|
|
||||||
// getting an authorization error.
|
|
||||||
// This function returns the registry authentication
|
|
||||||
// header value in base 64 format, or an error
|
|
||||||
// if the privilege request fails.
|
|
||||||
type RequestPrivilegeFunc func() (string, error)
|
|
||||||
|
|
||||||
//ImagePushOptions holds information to push images.
|
|
||||||
type ImagePushOptions ImagePullOptions
|
|
||||||
|
|
||||||
// ImageRemoveOptions holds parameters to remove images.
|
|
||||||
type ImageRemoveOptions struct {
|
|
||||||
Force bool
|
|
||||||
PruneChildren bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageSearchOptions holds parameters to search images with.
|
|
||||||
type ImageSearchOptions struct {
|
|
||||||
RegistryAuth string
|
|
||||||
PrivilegeFunc RequestPrivilegeFunc
|
|
||||||
Filters filters.Args
|
|
||||||
Limit int
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResizeOptions holds parameters to resize a tty.
|
|
||||||
// It can be used to resize container ttys and
|
|
||||||
// exec process ttys too.
|
|
||||||
type ResizeOptions struct {
|
|
||||||
Height uint
|
|
||||||
Width uint
|
|
||||||
}
|
|
||||||
|
|
||||||
// VersionResponse holds version information for the client and the server
|
|
||||||
type VersionResponse struct {
|
|
||||||
Client *Version
|
|
||||||
Server *Version
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerOK returns true when the client could connect to the docker server
|
|
||||||
// and parse the information received. It returns false otherwise.
|
|
||||||
func (v VersionResponse) ServerOK() bool {
|
|
||||||
return v.Server != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeListOptions holds parameters to list nodes with.
|
|
||||||
type NodeListOptions struct {
|
|
||||||
Filters filters.Args
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeRemoveOptions holds parameters to remove nodes with.
|
|
||||||
type NodeRemoveOptions struct {
|
|
||||||
Force bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceCreateOptions contains the options to use when creating a service.
|
|
||||||
type ServiceCreateOptions struct {
|
|
||||||
// EncodedRegistryAuth is the encoded registry authorization credentials to
|
|
||||||
// use when updating the service.
|
|
||||||
//
|
|
||||||
// This field follows the format of the X-Registry-Auth header.
|
|
||||||
EncodedRegistryAuth string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceCreateResponse contains the information returned to a client
|
|
||||||
// on the creation of a new service.
|
|
||||||
type ServiceCreateResponse struct {
|
|
||||||
// ID is the ID of the created service.
|
|
||||||
ID string
|
|
||||||
// Warnings is a set of non-fatal warning messages to pass on to the user.
|
|
||||||
Warnings []string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values for RegistryAuthFrom in ServiceUpdateOptions
|
|
||||||
const (
|
|
||||||
RegistryAuthFromSpec = "spec"
|
|
||||||
RegistryAuthFromPreviousSpec = "previous-spec"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServiceUpdateOptions contains the options to be used for updating services.
|
|
||||||
type ServiceUpdateOptions struct {
|
|
||||||
// EncodedRegistryAuth is the encoded registry authorization credentials to
|
|
||||||
// use when updating the service.
|
|
||||||
//
|
|
||||||
// This field follows the format of the X-Registry-Auth header.
|
|
||||||
EncodedRegistryAuth string
|
|
||||||
|
|
||||||
// TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
|
|
||||||
// into this field. While it does open API users up to racy writes, most
|
|
||||||
// users may not need that level of consistency in practice.
|
|
||||||
|
|
||||||
// RegistryAuthFrom specifies where to find the registry authorization
|
|
||||||
// credentials if they are not given in EncodedRegistryAuth. Valid
|
|
||||||
// values are "spec" and "previous-spec".
|
|
||||||
RegistryAuthFrom string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceListOptions holds parameters to list services with.
|
|
||||||
type ServiceListOptions struct {
|
|
||||||
Filters filters.Args
|
|
||||||
}
|
|
||||||
|
|
||||||
// TaskListOptions holds parameters to list tasks with.
|
|
||||||
type TaskListOptions struct {
|
|
||||||
Filters filters.Args
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginRemoveOptions holds parameters to remove plugins.
|
|
||||||
type PluginRemoveOptions struct {
|
|
||||||
Force bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginEnableOptions holds parameters to enable plugins.
|
|
||||||
type PluginEnableOptions struct {
|
|
||||||
Timeout int
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginDisableOptions holds parameters to disable plugins.
|
|
||||||
type PluginDisableOptions struct {
|
|
||||||
Force bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginInstallOptions holds parameters to install a plugin.
|
|
||||||
type PluginInstallOptions struct {
|
|
||||||
Disabled bool
|
|
||||||
AcceptAllPermissions bool
|
|
||||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
|
||||||
RemoteRef string // RemoteRef is the plugin name on the registry
|
|
||||||
PrivilegeFunc RequestPrivilegeFunc
|
|
||||||
AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
|
|
||||||
Args []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecretRequestOption is a type for requesting secrets
|
|
||||||
type SecretRequestOption struct {
|
|
||||||
Source string
|
|
||||||
Target string
|
|
||||||
UID string
|
|
||||||
GID string
|
|
||||||
Mode os.FileMode
|
|
||||||
}
|
|
||||||
|
|
||||||
// SwarmUnlockKeyResponse contains the response for Engine API:
|
|
||||||
// GET /swarm/unlockkey
|
|
||||||
type SwarmUnlockKeyResponse struct {
|
|
||||||
// UnlockKey is the unlock key in ASCII-armored format.
|
|
||||||
UnlockKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginCreateOptions hold all options to plugin create.
|
|
||||||
type PluginCreateOptions struct {
|
|
||||||
RepoName string
|
|
||||||
}
|
|
69
vendor/github.com/docker/docker/api/types/configs.go
generated
vendored
69
vendor/github.com/docker/docker/api/types/configs.go
generated
vendored
|
@ -1,69 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/docker/docker/api/types/network"
|
|
||||||
)
|
|
||||||
|
|
||||||
// configs holds structs used for internal communication between the
|
|
||||||
// frontend (such as an http server) and the backend (such as the
|
|
||||||
// docker daemon).
|
|
||||||
|
|
||||||
// ContainerCreateConfig is the parameter set to ContainerCreate()
|
|
||||||
type ContainerCreateConfig struct {
|
|
||||||
Name string
|
|
||||||
Config *container.Config
|
|
||||||
HostConfig *container.HostConfig
|
|
||||||
NetworkingConfig *network.NetworkingConfig
|
|
||||||
AdjustCPUShares bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerRmConfig holds arguments for the container remove
|
|
||||||
// operation. This struct is used to tell the backend what operations
|
|
||||||
// to perform.
|
|
||||||
type ContainerRmConfig struct {
|
|
||||||
ForceRemove, RemoveVolume, RemoveLink bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerCommitConfig contains build configs for commit operation,
|
|
||||||
// and is used when making a commit with the current state of the container.
|
|
||||||
type ContainerCommitConfig struct {
|
|
||||||
Pause bool
|
|
||||||
Repo string
|
|
||||||
Tag string
|
|
||||||
Author string
|
|
||||||
Comment string
|
|
||||||
// merge container config into commit config before commit
|
|
||||||
MergeConfigs bool
|
|
||||||
Config *container.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
|
||||||
// for the exec feature of docker.
|
|
||||||
type ExecConfig struct {
|
|
||||||
User string // User that will run the command
|
|
||||||
Privileged bool // Is the container in privileged mode
|
|
||||||
Tty bool // Attach standard streams to a tty.
|
|
||||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
|
||||||
AttachStderr bool // Attach the standard error
|
|
||||||
AttachStdout bool // Attach the standard output
|
|
||||||
Detach bool // Execute in detach mode
|
|
||||||
DetachKeys string // Escape keys for detach
|
|
||||||
Env []string // Environment variables
|
|
||||||
Cmd []string // Execution commands and args
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginRmConfig holds arguments for plugin remove.
|
|
||||||
type PluginRmConfig struct {
|
|
||||||
ForceRemove bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginEnableConfig holds arguments for plugin enable
|
|
||||||
type PluginEnableConfig struct {
|
|
||||||
Timeout int
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginDisableConfig holds arguments for plugin disable.
|
|
||||||
type PluginDisableConfig struct {
|
|
||||||
ForceDisable bool
|
|
||||||
}
|
|
62
vendor/github.com/docker/docker/api/types/container/config.go
generated
vendored
62
vendor/github.com/docker/docker/api/types/container/config.go
generated
vendored
|
@ -1,62 +0,0 @@
|
||||||
package container
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/strslice"
|
|
||||||
"github.com/docker/go-connections/nat"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
|
|
||||||
type HealthConfig struct {
|
|
||||||
// Test is the test to perform to check that the container is healthy.
|
|
||||||
// An empty slice means to inherit the default.
|
|
||||||
// The options are:
|
|
||||||
// {} : inherit healthcheck
|
|
||||||
// {"NONE"} : disable healthcheck
|
|
||||||
// {"CMD", args...} : exec arguments directly
|
|
||||||
// {"CMD-SHELL", command} : run command with system's default shell
|
|
||||||
Test []string `json:",omitempty"`
|
|
||||||
|
|
||||||
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
|
||||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
|
||||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
|
||||||
|
|
||||||
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
|
||||||
// Zero means inherit.
|
|
||||||
Retries int `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config contains the configuration data about a container.
|
|
||||||
// It should hold only portable information about the container.
|
|
||||||
// Here, "portable" means "independent from the host we are running on".
|
|
||||||
// Non-portable information *should* appear in HostConfig.
|
|
||||||
// All fields added to this struct must be marked `omitempty` to keep getting
|
|
||||||
// predictable hashes from the old `v1Compatibility` configuration.
|
|
||||||
type Config struct {
|
|
||||||
Hostname string // Hostname
|
|
||||||
Domainname string // Domainname
|
|
||||||
User string // User that will run the command(s) inside the container, also support user:group
|
|
||||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
|
||||||
AttachStdout bool // Attach the standard output
|
|
||||||
AttachStderr bool // Attach the standard error
|
|
||||||
ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
|
|
||||||
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
|
|
||||||
OpenStdin bool // Open stdin
|
|
||||||
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
|
|
||||||
Env []string // List of environment variable to set in the container
|
|
||||||
Cmd strslice.StrSlice // Command to run when starting the container
|
|
||||||
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
|
|
||||||
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
|
|
||||||
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
|
|
||||||
Volumes map[string]struct{} // List of volumes (mounts) used for the container
|
|
||||||
WorkingDir string // Current directory (PWD) in the command will be launched
|
|
||||||
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
|
|
||||||
NetworkDisabled bool `json:",omitempty"` // Is network disabled
|
|
||||||
MacAddress string `json:",omitempty"` // Mac Address of the container
|
|
||||||
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
|
|
||||||
Labels map[string]string // List of labels set to this container
|
|
||||||
StopSignal string `json:",omitempty"` // Signal to stop a container
|
|
||||||
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
|
|
||||||
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
|
|
||||||
}
|
|
21
vendor/github.com/docker/docker/api/types/container/container_create.go
generated
vendored
21
vendor/github.com/docker/docker/api/types/container/container_create.go
generated
vendored
|
@ -1,21 +0,0 @@
|
||||||
package container
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// DO NOT EDIT THIS FILE
|
|
||||||
// This file was generated by `swagger generate operation`
|
|
||||||
//
|
|
||||||
// See hack/swagger-gen.sh
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// ContainerCreateCreatedBody container create created body
|
|
||||||
// swagger:model ContainerCreateCreatedBody
|
|
||||||
type ContainerCreateCreatedBody struct {
|
|
||||||
|
|
||||||
// The ID of the created container
|
|
||||||
// Required: true
|
|
||||||
ID string `json:"Id"`
|
|
||||||
|
|
||||||
// Warnings encountered when creating the container
|
|
||||||
// Required: true
|
|
||||||
Warnings []string `json:"Warnings"`
|
|
||||||
}
|
|
17
vendor/github.com/docker/docker/api/types/container/container_update.go
generated
vendored
17
vendor/github.com/docker/docker/api/types/container/container_update.go
generated
vendored
|
@ -1,17 +0,0 @@
|
||||||
package container
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// DO NOT EDIT THIS FILE
|
|
||||||
// This file was generated by `swagger generate operation`
|
|
||||||
//
|
|
||||||
// See hack/swagger-gen.sh
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// ContainerUpdateOKBody container update o k body
|
|
||||||
// swagger:model ContainerUpdateOKBody
|
|
||||||
type ContainerUpdateOKBody struct {
|
|
||||||
|
|
||||||
// warnings
|
|
||||||
// Required: true
|
|
||||||
Warnings []string `json:"Warnings"`
|
|
||||||
}
|
|
17
vendor/github.com/docker/docker/api/types/container/container_wait.go
generated
vendored
17
vendor/github.com/docker/docker/api/types/container/container_wait.go
generated
vendored
|
@ -1,17 +0,0 @@
|
||||||
package container
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// DO NOT EDIT THIS FILE
|
|
||||||
// This file was generated by `swagger generate operation`
|
|
||||||
//
|
|
||||||
// See hack/swagger-gen.sh
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// ContainerWaitOKBody container wait o k body
|
|
||||||
// swagger:model ContainerWaitOKBody
|
|
||||||
type ContainerWaitOKBody struct {
|
|
||||||
|
|
||||||
// Exit code of the container
|
|
||||||
// Required: true
|
|
||||||
StatusCode int64 `json:"StatusCode"`
|
|
||||||
}
|
|
333
vendor/github.com/docker/docker/api/types/container/host_config.go
generated
vendored
333
vendor/github.com/docker/docker/api/types/container/host_config.go
generated
vendored
|
@ -1,333 +0,0 @@
|
||||||
package container
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/blkiodev"
|
|
||||||
"github.com/docker/docker/api/types/mount"
|
|
||||||
"github.com/docker/docker/api/types/strslice"
|
|
||||||
"github.com/docker/go-connections/nat"
|
|
||||||
"github.com/docker/go-units"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NetworkMode represents the container network stack.
|
|
||||||
type NetworkMode string
|
|
||||||
|
|
||||||
// Isolation represents the isolation technology of a container. The supported
|
|
||||||
// values are platform specific
|
|
||||||
type Isolation string
|
|
||||||
|
|
||||||
// IsDefault indicates the default isolation technology of a container. On Linux this
|
|
||||||
// is the native driver. On Windows, this is a Windows Server Container.
|
|
||||||
func (i Isolation) IsDefault() bool {
|
|
||||||
return strings.ToLower(string(i)) == "default" || string(i) == ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// IpcMode represents the container ipc stack.
|
|
||||||
type IpcMode string
|
|
||||||
|
|
||||||
// IsPrivate indicates whether the container uses its private ipc stack.
|
|
||||||
func (n IpcMode) IsPrivate() bool {
|
|
||||||
return !(n.IsHost() || n.IsContainer())
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsHost indicates whether the container uses the host's ipc stack.
|
|
||||||
func (n IpcMode) IsHost() bool {
|
|
||||||
return n == "host"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsContainer indicates whether the container uses a container's ipc stack.
|
|
||||||
func (n IpcMode) IsContainer() bool {
|
|
||||||
parts := strings.SplitN(string(n), ":", 2)
|
|
||||||
return len(parts) > 1 && parts[0] == "container"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Valid indicates whether the ipc stack is valid.
|
|
||||||
func (n IpcMode) Valid() bool {
|
|
||||||
parts := strings.Split(string(n), ":")
|
|
||||||
switch mode := parts[0]; mode {
|
|
||||||
case "", "host":
|
|
||||||
case "container":
|
|
||||||
if len(parts) != 2 || parts[1] == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Container returns the name of the container ipc stack is going to be used.
|
|
||||||
func (n IpcMode) Container() string {
|
|
||||||
parts := strings.SplitN(string(n), ":", 2)
|
|
||||||
if len(parts) > 1 {
|
|
||||||
return parts[1]
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// UsernsMode represents userns mode in the container.
|
|
||||||
type UsernsMode string
|
|
||||||
|
|
||||||
// IsHost indicates whether the container uses the host's userns.
|
|
||||||
func (n UsernsMode) IsHost() bool {
|
|
||||||
return n == "host"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPrivate indicates whether the container uses the a private userns.
|
|
||||||
func (n UsernsMode) IsPrivate() bool {
|
|
||||||
return !(n.IsHost())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Valid indicates whether the userns is valid.
|
|
||||||
func (n UsernsMode) Valid() bool {
|
|
||||||
parts := strings.Split(string(n), ":")
|
|
||||||
switch mode := parts[0]; mode {
|
|
||||||
case "", "host":
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// CgroupSpec represents the cgroup to use for the container.
|
|
||||||
type CgroupSpec string
|
|
||||||
|
|
||||||
// IsContainer indicates whether the container is using another container cgroup
|
|
||||||
func (c CgroupSpec) IsContainer() bool {
|
|
||||||
parts := strings.SplitN(string(c), ":", 2)
|
|
||||||
return len(parts) > 1 && parts[0] == "container"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Valid indicates whether the cgroup spec is valid.
|
|
||||||
func (c CgroupSpec) Valid() bool {
|
|
||||||
return c.IsContainer() || c == ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Container returns the name of the container whose cgroup will be used.
|
|
||||||
func (c CgroupSpec) Container() string {
|
|
||||||
parts := strings.SplitN(string(c), ":", 2)
|
|
||||||
if len(parts) > 1 {
|
|
||||||
return parts[1]
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// UTSMode represents the UTS namespace of the container.
|
|
||||||
type UTSMode string
|
|
||||||
|
|
||||||
// IsPrivate indicates whether the container uses its private UTS namespace.
|
|
||||||
func (n UTSMode) IsPrivate() bool {
|
|
||||||
return !(n.IsHost())
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsHost indicates whether the container uses the host's UTS namespace.
|
|
||||||
func (n UTSMode) IsHost() bool {
|
|
||||||
return n == "host"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Valid indicates whether the UTS namespace is valid.
|
|
||||||
func (n UTSMode) Valid() bool {
|
|
||||||
parts := strings.Split(string(n), ":")
|
|
||||||
switch mode := parts[0]; mode {
|
|
||||||
case "", "host":
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// PidMode represents the pid namespace of the container.
|
|
||||||
type PidMode string
|
|
||||||
|
|
||||||
// IsPrivate indicates whether the container uses its own new pid namespace.
|
|
||||||
func (n PidMode) IsPrivate() bool {
|
|
||||||
return !(n.IsHost() || n.IsContainer())
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsHost indicates whether the container uses the host's pid namespace.
|
|
||||||
func (n PidMode) IsHost() bool {
|
|
||||||
return n == "host"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsContainer indicates whether the container uses a container's pid namespace.
|
|
||||||
func (n PidMode) IsContainer() bool {
|
|
||||||
parts := strings.SplitN(string(n), ":", 2)
|
|
||||||
return len(parts) > 1 && parts[0] == "container"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Valid indicates whether the pid namespace is valid.
|
|
||||||
func (n PidMode) Valid() bool {
|
|
||||||
parts := strings.Split(string(n), ":")
|
|
||||||
switch mode := parts[0]; mode {
|
|
||||||
case "", "host":
|
|
||||||
case "container":
|
|
||||||
if len(parts) != 2 || parts[1] == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Container returns the name of the container whose pid namespace is going to be used.
|
|
||||||
func (n PidMode) Container() string {
|
|
||||||
parts := strings.SplitN(string(n), ":", 2)
|
|
||||||
if len(parts) > 1 {
|
|
||||||
return parts[1]
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeviceMapping represents the device mapping between the host and the container.
|
|
||||||
type DeviceMapping struct {
|
|
||||||
PathOnHost string
|
|
||||||
PathInContainer string
|
|
||||||
CgroupPermissions string
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestartPolicy represents the restart policies of the container.
|
|
||||||
type RestartPolicy struct {
|
|
||||||
Name string
|
|
||||||
MaximumRetryCount int
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNone indicates whether the container has the "no" restart policy.
|
|
||||||
// This means the container will not automatically restart when exiting.
|
|
||||||
func (rp *RestartPolicy) IsNone() bool {
|
|
||||||
return rp.Name == "no" || rp.Name == ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsAlways indicates whether the container has the "always" restart policy.
|
|
||||||
// This means the container will automatically restart regardless of the exit status.
|
|
||||||
func (rp *RestartPolicy) IsAlways() bool {
|
|
||||||
return rp.Name == "always"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsOnFailure indicates whether the container has the "on-failure" restart policy.
|
|
||||||
// This means the container will automatically restart of exiting with a non-zero exit status.
|
|
||||||
func (rp *RestartPolicy) IsOnFailure() bool {
|
|
||||||
return rp.Name == "on-failure"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsUnlessStopped indicates whether the container has the
|
|
||||||
// "unless-stopped" restart policy. This means the container will
|
|
||||||
// automatically restart unless user has put it to stopped state.
|
|
||||||
func (rp *RestartPolicy) IsUnlessStopped() bool {
|
|
||||||
return rp.Name == "unless-stopped"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsSame compares two RestartPolicy to see if they are the same
|
|
||||||
func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
|
|
||||||
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogConfig represents the logging configuration of the container.
|
|
||||||
type LogConfig struct {
|
|
||||||
Type string
|
|
||||||
Config map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resources contains container's resources (cgroups config, ulimits...)
|
|
||||||
type Resources struct {
|
|
||||||
// Applicable to all platforms
|
|
||||||
CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
|
|
||||||
Memory int64 // Memory limit (in bytes)
|
|
||||||
NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
|
|
||||||
|
|
||||||
// Applicable to UNIX platforms
|
|
||||||
CgroupParent string // Parent cgroup.
|
|
||||||
BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
|
|
||||||
BlkioWeightDevice []*blkiodev.WeightDevice
|
|
||||||
BlkioDeviceReadBps []*blkiodev.ThrottleDevice
|
|
||||||
BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
|
|
||||||
BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
|
|
||||||
BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
|
|
||||||
CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
|
|
||||||
CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
|
|
||||||
CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
|
|
||||||
CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
|
|
||||||
CpusetCpus string // CpusetCpus 0-2, 0,1
|
|
||||||
CpusetMems string // CpusetMems 0-2, 0,1
|
|
||||||
Devices []DeviceMapping // List of devices to map inside the container
|
|
||||||
DiskQuota int64 // Disk limit (in bytes)
|
|
||||||
KernelMemory int64 // Kernel memory limit (in bytes)
|
|
||||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
|
||||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
|
||||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
|
||||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
|
||||||
PidsLimit int64 // Setting pids limit for a container
|
|
||||||
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
|
||||||
|
|
||||||
// Applicable to Windows
|
|
||||||
CPUCount int64 `json:"CpuCount"` // CPU count
|
|
||||||
CPUPercent int64 `json:"CpuPercent"` // CPU percent
|
|
||||||
IOMaximumIOps uint64 // Maximum IOps for the container system drive
|
|
||||||
IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateConfig holds the mutable attributes of a Container.
|
|
||||||
// Those attributes can be updated at runtime.
|
|
||||||
type UpdateConfig struct {
|
|
||||||
// Contains container's resources (cgroups, ulimits)
|
|
||||||
Resources
|
|
||||||
RestartPolicy RestartPolicy
|
|
||||||
}
|
|
||||||
|
|
||||||
// HostConfig the non-portable Config structure of a container.
|
|
||||||
// Here, "non-portable" means "dependent of the host we are running on".
|
|
||||||
// Portable information *should* appear in Config.
|
|
||||||
type HostConfig struct {
|
|
||||||
// Applicable to all platforms
|
|
||||||
Binds []string // List of volume bindings for this container
|
|
||||||
ContainerIDFile string // File (path) where the containerId is written
|
|
||||||
LogConfig LogConfig // Configuration of the logs for this container
|
|
||||||
NetworkMode NetworkMode // Network mode to use for the container
|
|
||||||
PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
|
|
||||||
RestartPolicy RestartPolicy // Restart policy to be used for the container
|
|
||||||
AutoRemove bool // Automatically remove container when it exits
|
|
||||||
VolumeDriver string // Name of the volume driver used to mount volumes
|
|
||||||
VolumesFrom []string // List of volumes to take from other container
|
|
||||||
|
|
||||||
// Applicable to UNIX platforms
|
|
||||||
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
|
|
||||||
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
|
|
||||||
DNS []string `json:"Dns"` // List of DNS server to lookup
|
|
||||||
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
|
||||||
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
|
||||||
ExtraHosts []string // List of extra hosts
|
|
||||||
GroupAdd []string // List of additional groups that the container process will run as
|
|
||||||
IpcMode IpcMode // IPC namespace to use for the container
|
|
||||||
Cgroup CgroupSpec // Cgroup to use for the container
|
|
||||||
Links []string // List of links (in the name:alias form)
|
|
||||||
OomScoreAdj int // Container preference for OOM-killing
|
|
||||||
PidMode PidMode // PID namespace to use for the container
|
|
||||||
Privileged bool // Is the container in privileged mode
|
|
||||||
PublishAllPorts bool // Should docker publish all exposed port for the container
|
|
||||||
ReadonlyRootfs bool // Is the container root filesystem in read-only
|
|
||||||
SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
|
|
||||||
StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
|
|
||||||
Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
|
|
||||||
UTSMode UTSMode // UTS namespace to use for the container
|
|
||||||
UsernsMode UsernsMode // The user namespace to use for the container
|
|
||||||
ShmSize int64 // Total shm memory usage
|
|
||||||
Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
|
|
||||||
Runtime string `json:",omitempty"` // Runtime to use with this container
|
|
||||||
|
|
||||||
// Applicable to Windows
|
|
||||||
ConsoleSize [2]uint // Initial console size (height,width)
|
|
||||||
Isolation Isolation // Isolation technology of the container (eg default, hyperv)
|
|
||||||
|
|
||||||
// Contains container's resources (cgroups, ulimits)
|
|
||||||
Resources
|
|
||||||
|
|
||||||
// Mounts specs used by the container
|
|
||||||
Mounts []mount.Mount `json:",omitempty"`
|
|
||||||
|
|
||||||
// Run a custom init inside the container, if null, use the daemon's configured settings
|
|
||||||
Init *bool `json:",omitempty"`
|
|
||||||
|
|
||||||
// Custom init path
|
|
||||||
InitPath string `json:",omitempty"`
|
|
||||||
}
|
|
81
vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
generated
vendored
81
vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
generated
vendored
|
@ -1,81 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package container
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
// IsValid indicates if an isolation technology is valid
|
|
||||||
func (i Isolation) IsValid() bool {
|
|
||||||
return i.IsDefault()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPrivate indicates whether container uses its private network stack.
|
|
||||||
func (n NetworkMode) IsPrivate() bool {
|
|
||||||
return !(n.IsHost() || n.IsContainer())
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsDefault indicates whether container uses the default network stack.
|
|
||||||
func (n NetworkMode) IsDefault() bool {
|
|
||||||
return n == "default"
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkName returns the name of the network stack.
|
|
||||||
func (n NetworkMode) NetworkName() string {
|
|
||||||
if n.IsBridge() {
|
|
||||||
return "bridge"
|
|
||||||
} else if n.IsHost() {
|
|
||||||
return "host"
|
|
||||||
} else if n.IsContainer() {
|
|
||||||
return "container"
|
|
||||||
} else if n.IsNone() {
|
|
||||||
return "none"
|
|
||||||
} else if n.IsDefault() {
|
|
||||||
return "default"
|
|
||||||
} else if n.IsUserDefined() {
|
|
||||||
return n.UserDefined()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsBridge indicates whether container uses the bridge network stack
|
|
||||||
func (n NetworkMode) IsBridge() bool {
|
|
||||||
return n == "bridge"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsHost indicates whether container uses the host network stack.
|
|
||||||
func (n NetworkMode) IsHost() bool {
|
|
||||||
return n == "host"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsContainer indicates whether container uses a container network stack.
|
|
||||||
func (n NetworkMode) IsContainer() bool {
|
|
||||||
parts := strings.SplitN(string(n), ":", 2)
|
|
||||||
return len(parts) > 1 && parts[0] == "container"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNone indicates whether container isn't using a network stack.
|
|
||||||
func (n NetworkMode) IsNone() bool {
|
|
||||||
return n == "none"
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnectedContainer is the id of the container which network this container is connected to.
|
|
||||||
func (n NetworkMode) ConnectedContainer() string {
|
|
||||||
parts := strings.SplitN(string(n), ":", 2)
|
|
||||||
if len(parts) > 1 {
|
|
||||||
return parts[1]
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsUserDefined indicates user-created network
|
|
||||||
func (n NetworkMode) IsUserDefined() bool {
|
|
||||||
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
|
|
||||||
}
|
|
||||||
|
|
||||||
//UserDefined indicates user-created network
|
|
||||||
func (n NetworkMode) UserDefined() string {
|
|
||||||
if n.IsUserDefined() {
|
|
||||||
return string(n)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
87
vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
generated
vendored
87
vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
generated
vendored
|
@ -1,87 +0,0 @@
|
||||||
package container
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsDefault indicates whether container uses the default network stack.
|
|
||||||
func (n NetworkMode) IsDefault() bool {
|
|
||||||
return n == "default"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNone indicates whether container isn't using a network stack.
|
|
||||||
func (n NetworkMode) IsNone() bool {
|
|
||||||
return n == "none"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsContainer indicates whether container uses a container network stack.
|
|
||||||
// Returns false as windows doesn't support this mode
|
|
||||||
func (n NetworkMode) IsContainer() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsBridge indicates whether container uses the bridge network stack
|
|
||||||
// in windows it is given the name NAT
|
|
||||||
func (n NetworkMode) IsBridge() bool {
|
|
||||||
return n == "nat"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsHost indicates whether container uses the host network stack.
|
|
||||||
// returns false as this is not supported by windows
|
|
||||||
func (n NetworkMode) IsHost() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPrivate indicates whether container uses its private network stack.
|
|
||||||
func (n NetworkMode) IsPrivate() bool {
|
|
||||||
return !(n.IsHost() || n.IsContainer())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnectedContainer is the id of the container which network this container is connected to.
|
|
||||||
// Returns blank string on windows
|
|
||||||
func (n NetworkMode) ConnectedContainer() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsUserDefined indicates user-created network
|
|
||||||
func (n NetworkMode) IsUserDefined() bool {
|
|
||||||
return !n.IsDefault() && !n.IsNone() && !n.IsBridge()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsHyperV indicates the use of a Hyper-V partition for isolation
|
|
||||||
func (i Isolation) IsHyperV() bool {
|
|
||||||
return strings.ToLower(string(i)) == "hyperv"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsProcess indicates the use of process isolation
|
|
||||||
func (i Isolation) IsProcess() bool {
|
|
||||||
return strings.ToLower(string(i)) == "process"
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid indicates if an isolation technology is valid
|
|
||||||
func (i Isolation) IsValid() bool {
|
|
||||||
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkName returns the name of the network stack.
|
|
||||||
func (n NetworkMode) NetworkName() string {
|
|
||||||
if n.IsDefault() {
|
|
||||||
return "default"
|
|
||||||
} else if n.IsBridge() {
|
|
||||||
return "nat"
|
|
||||||
} else if n.IsNone() {
|
|
||||||
return "none"
|
|
||||||
} else if n.IsUserDefined() {
|
|
||||||
return n.UserDefined()
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
//UserDefined indicates user-created network
|
|
||||||
func (n NetworkMode) UserDefined() string {
|
|
||||||
if n.IsUserDefined() {
|
|
||||||
return string(n)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
13
vendor/github.com/docker/docker/api/types/error_response.go
generated
vendored
13
vendor/github.com/docker/docker/api/types/error_response.go
generated
vendored
|
@ -1,13 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// This file was generated by the swagger tool.
|
|
||||||
// Editing this file might prove futile when you re-run the swagger generate command
|
|
||||||
|
|
||||||
// ErrorResponse Represents an error.
|
|
||||||
// swagger:model ErrorResponse
|
|
||||||
type ErrorResponse struct {
|
|
||||||
|
|
||||||
// The error message.
|
|
||||||
// Required: true
|
|
||||||
Message string `json:"message"`
|
|
||||||
}
|
|
310
vendor/github.com/docker/docker/api/types/filters/parse.go
generated
vendored
310
vendor/github.com/docker/docker/api/types/filters/parse.go
generated
vendored
|
@ -1,310 +0,0 @@
|
||||||
// Package filters provides helper function to parse and handle command line
|
|
||||||
// filter, used for example in docker ps or docker images commands.
|
|
||||||
package filters
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/versions"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Args stores filter arguments as map key:{map key: bool}.
|
|
||||||
// It contains an aggregation of the map of arguments (which are in the form
|
|
||||||
// of -f 'key=value') based on the key, and stores values for the same key
|
|
||||||
// in a map with string keys and boolean values.
|
|
||||||
// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
|
|
||||||
// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
|
|
||||||
type Args struct {
|
|
||||||
fields map[string]map[string]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewArgs initializes a new Args struct.
|
|
||||||
func NewArgs() Args {
|
|
||||||
return Args{fields: map[string]map[string]bool{}}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseFlag parses the argument to the filter flag. Like
|
|
||||||
//
|
|
||||||
// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
|
|
||||||
//
|
|
||||||
// If prev map is provided, then it is appended to, and returned. By default a new
|
|
||||||
// map is created.
|
|
||||||
func ParseFlag(arg string, prev Args) (Args, error) {
|
|
||||||
filters := prev
|
|
||||||
if len(arg) == 0 {
|
|
||||||
return filters, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.Contains(arg, "=") {
|
|
||||||
return filters, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
f := strings.SplitN(arg, "=", 2)
|
|
||||||
|
|
||||||
name := strings.ToLower(strings.TrimSpace(f[0]))
|
|
||||||
value := strings.TrimSpace(f[1])
|
|
||||||
|
|
||||||
filters.Add(name, value)
|
|
||||||
|
|
||||||
return filters, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrBadFormat is an error returned in case of bad format for a filter.
|
|
||||||
var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
|
|
||||||
|
|
||||||
// ToParam packs the Args into a string for easy transport from client to server.
|
|
||||||
func ToParam(a Args) (string, error) {
|
|
||||||
// this way we don't URL encode {}, just empty space
|
|
||||||
if a.Len() == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
buf, err := json.Marshal(a.fields)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToParamWithVersion packs the Args into a string for easy transport from client to server.
|
|
||||||
// The generated string will depend on the specified version (corresponding to the API version).
|
|
||||||
func ToParamWithVersion(version string, a Args) (string, error) {
|
|
||||||
// this way we don't URL encode {}, just empty space
|
|
||||||
if a.Len() == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// for daemons older than v1.10, filter must be of the form map[string][]string
|
|
||||||
buf := []byte{}
|
|
||||||
err := errors.New("")
|
|
||||||
if version != "" && versions.LessThan(version, "1.22") {
|
|
||||||
buf, err = json.Marshal(convertArgsToSlice(a.fields))
|
|
||||||
} else {
|
|
||||||
buf, err = json.Marshal(a.fields)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromParam unpacks the filter Args.
|
|
||||||
func FromParam(p string) (Args, error) {
|
|
||||||
if len(p) == 0 {
|
|
||||||
return NewArgs(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
r := strings.NewReader(p)
|
|
||||||
d := json.NewDecoder(r)
|
|
||||||
|
|
||||||
m := map[string]map[string]bool{}
|
|
||||||
if err := d.Decode(&m); err != nil {
|
|
||||||
r.Seek(0, 0)
|
|
||||||
|
|
||||||
// Allow parsing old arguments in slice format.
|
|
||||||
// Because other libraries might be sending them in this format.
|
|
||||||
deprecated := map[string][]string{}
|
|
||||||
if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
|
|
||||||
m = deprecatedArgs(deprecated)
|
|
||||||
} else {
|
|
||||||
return NewArgs(), err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Args{m}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the list of values associates with a field.
|
|
||||||
// It returns a slice of strings to keep backwards compatibility with old code.
|
|
||||||
func (filters Args) Get(field string) []string {
|
|
||||||
values := filters.fields[field]
|
|
||||||
if values == nil {
|
|
||||||
return make([]string, 0)
|
|
||||||
}
|
|
||||||
slice := make([]string, 0, len(values))
|
|
||||||
for key := range values {
|
|
||||||
slice = append(slice, key)
|
|
||||||
}
|
|
||||||
return slice
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a new value to a filter field.
|
|
||||||
func (filters Args) Add(name, value string) {
|
|
||||||
if _, ok := filters.fields[name]; ok {
|
|
||||||
filters.fields[name][value] = true
|
|
||||||
} else {
|
|
||||||
filters.fields[name] = map[string]bool{value: true}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Del removes a value from a filter field.
|
|
||||||
func (filters Args) Del(name, value string) {
|
|
||||||
if _, ok := filters.fields[name]; ok {
|
|
||||||
delete(filters.fields[name], value)
|
|
||||||
if len(filters.fields[name]) == 0 {
|
|
||||||
delete(filters.fields, name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of fields in the arguments.
|
|
||||||
func (filters Args) Len() int {
|
|
||||||
return len(filters.fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MatchKVList returns true if the values for the specified field matches the ones
|
|
||||||
// from the sources.
|
|
||||||
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
|
|
||||||
// field is 'label' and sources are {'label1': '1', 'label2': '2'}
|
|
||||||
// it returns true.
|
|
||||||
func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
|
||||||
fieldValues := filters.fields[field]
|
|
||||||
|
|
||||||
//do not filter if there is no filter set or cannot determine filter
|
|
||||||
if len(fieldValues) == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(sources) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for name2match := range fieldValues {
|
|
||||||
testKV := strings.SplitN(name2match, "=", 2)
|
|
||||||
|
|
||||||
v, ok := sources[testKV[0]]
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(testKV) == 2 && testKV[1] != v {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match returns true if the values for the specified field matches the source string
|
|
||||||
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
|
|
||||||
// field is 'image.name' and source is 'ubuntu'
|
|
||||||
// it returns true.
|
|
||||||
func (filters Args) Match(field, source string) bool {
|
|
||||||
if filters.ExactMatch(field, source) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldValues := filters.fields[field]
|
|
||||||
for name2match := range fieldValues {
|
|
||||||
match, err := regexp.MatchString(name2match, source)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if match {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExactMatch returns true if the source matches exactly one of the filters.
|
|
||||||
func (filters Args) ExactMatch(field, source string) bool {
|
|
||||||
fieldValues, ok := filters.fields[field]
|
|
||||||
//do not filter if there is no filter set or cannot determine filter
|
|
||||||
if !ok || len(fieldValues) == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// try to match full name value to avoid O(N) regular expression matching
|
|
||||||
return fieldValues[source]
|
|
||||||
}
|
|
||||||
|
|
||||||
// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
|
|
||||||
func (filters Args) UniqueExactMatch(field, source string) bool {
|
|
||||||
fieldValues := filters.fields[field]
|
|
||||||
//do not filter if there is no filter set or cannot determine filter
|
|
||||||
if len(fieldValues) == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if len(filters.fields[field]) != 1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// try to match full name value to avoid O(N) regular expression matching
|
|
||||||
return fieldValues[source]
|
|
||||||
}
|
|
||||||
|
|
||||||
// FuzzyMatch returns true if the source matches exactly one of the filters,
|
|
||||||
// or the source has one of the filters as a prefix.
|
|
||||||
func (filters Args) FuzzyMatch(field, source string) bool {
|
|
||||||
if filters.ExactMatch(field, source) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldValues := filters.fields[field]
|
|
||||||
for prefix := range fieldValues {
|
|
||||||
if strings.HasPrefix(source, prefix) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Include returns true if the name of the field to filter is in the filters.
|
|
||||||
func (filters Args) Include(field string) bool {
|
|
||||||
_, ok := filters.fields[field]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate ensures that all the fields in the filter are valid.
|
|
||||||
// It returns an error as soon as it finds an invalid field.
|
|
||||||
func (filters Args) Validate(accepted map[string]bool) error {
|
|
||||||
for name := range filters.fields {
|
|
||||||
if !accepted[name] {
|
|
||||||
return fmt.Errorf("Invalid filter '%s'", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WalkValues iterates over the list of filtered values for a field.
|
|
||||||
// It stops the iteration if it finds an error and it returns that error.
|
|
||||||
func (filters Args) WalkValues(field string, op func(value string) error) error {
|
|
||||||
if _, ok := filters.fields[field]; !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for v := range filters.fields[field] {
|
|
||||||
if err := op(v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
|
|
||||||
m := map[string]map[string]bool{}
|
|
||||||
for k, v := range d {
|
|
||||||
values := map[string]bool{}
|
|
||||||
for _, vv := range v {
|
|
||||||
values[vv] = true
|
|
||||||
}
|
|
||||||
m[k] = values
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
|
|
||||||
m := map[string][]string{}
|
|
||||||
for k, v := range f {
|
|
||||||
values := []string{}
|
|
||||||
for kk := range v {
|
|
||||||
if v[kk] {
|
|
||||||
values = append(values, kk)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m[k] = values
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
13
vendor/github.com/docker/docker/api/types/id_response.go
generated
vendored
13
vendor/github.com/docker/docker/api/types/id_response.go
generated
vendored
|
@ -1,13 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// This file was generated by the swagger tool.
|
|
||||||
// Editing this file might prove futile when you re-run the swagger generate command
|
|
||||||
|
|
||||||
// IDResponse Response to an API call that returns just an Id
|
|
||||||
// swagger:model IdResponse
|
|
||||||
type IDResponse struct {
|
|
||||||
|
|
||||||
// The id of the newly created object.
|
|
||||||
// Required: true
|
|
||||||
ID string `json:"Id"`
|
|
||||||
}
|
|
49
vendor/github.com/docker/docker/api/types/image_summary.go
generated
vendored
49
vendor/github.com/docker/docker/api/types/image_summary.go
generated
vendored
|
@ -1,49 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// This file was generated by the swagger tool.
|
|
||||||
// Editing this file might prove futile when you re-run the swagger generate command
|
|
||||||
|
|
||||||
// ImageSummary image summary
|
|
||||||
// swagger:model ImageSummary
|
|
||||||
type ImageSummary struct {
|
|
||||||
|
|
||||||
// containers
|
|
||||||
// Required: true
|
|
||||||
Containers int64 `json:"Containers"`
|
|
||||||
|
|
||||||
// created
|
|
||||||
// Required: true
|
|
||||||
Created int64 `json:"Created"`
|
|
||||||
|
|
||||||
// Id
|
|
||||||
// Required: true
|
|
||||||
ID string `json:"Id"`
|
|
||||||
|
|
||||||
// labels
|
|
||||||
// Required: true
|
|
||||||
Labels map[string]string `json:"Labels"`
|
|
||||||
|
|
||||||
// parent Id
|
|
||||||
// Required: true
|
|
||||||
ParentID string `json:"ParentId"`
|
|
||||||
|
|
||||||
// repo digests
|
|
||||||
// Required: true
|
|
||||||
RepoDigests []string `json:"RepoDigests"`
|
|
||||||
|
|
||||||
// repo tags
|
|
||||||
// Required: true
|
|
||||||
RepoTags []string `json:"RepoTags"`
|
|
||||||
|
|
||||||
// shared size
|
|
||||||
// Required: true
|
|
||||||
SharedSize int64 `json:"SharedSize"`
|
|
||||||
|
|
||||||
// size
|
|
||||||
// Required: true
|
|
||||||
Size int64 `json:"Size"`
|
|
||||||
|
|
||||||
// virtual size
|
|
||||||
// Required: true
|
|
||||||
VirtualSize int64 `json:"VirtualSize"`
|
|
||||||
}
|
|
103
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
103
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
|
@ -1,103 +0,0 @@
|
||||||
package mount
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Type represents the type of a mount.
|
|
||||||
type Type string
|
|
||||||
|
|
||||||
// Type constants
|
|
||||||
const (
|
|
||||||
// TypeBind is the type for mounting host dir
|
|
||||||
TypeBind Type = "bind"
|
|
||||||
// TypeVolume is the type for remote storage volumes
|
|
||||||
TypeVolume Type = "volume"
|
|
||||||
// TypeTmpfs is the type for mounting tmpfs
|
|
||||||
TypeTmpfs Type = "tmpfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Mount represents a mount (volume).
|
|
||||||
type Mount struct {
|
|
||||||
Type Type `json:",omitempty"`
|
|
||||||
// Source specifies the name of the mount. Depending on mount type, this
|
|
||||||
// may be a volume name or a host path, or even ignored.
|
|
||||||
// Source is not supported for tmpfs (must be an empty value)
|
|
||||||
Source string `json:",omitempty"`
|
|
||||||
Target string `json:",omitempty"`
|
|
||||||
ReadOnly bool `json:",omitempty"`
|
|
||||||
|
|
||||||
BindOptions *BindOptions `json:",omitempty"`
|
|
||||||
VolumeOptions *VolumeOptions `json:",omitempty"`
|
|
||||||
TmpfsOptions *TmpfsOptions `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Propagation represents the propagation of a mount.
|
|
||||||
type Propagation string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// PropagationRPrivate RPRIVATE
|
|
||||||
PropagationRPrivate Propagation = "rprivate"
|
|
||||||
// PropagationPrivate PRIVATE
|
|
||||||
PropagationPrivate Propagation = "private"
|
|
||||||
// PropagationRShared RSHARED
|
|
||||||
PropagationRShared Propagation = "rshared"
|
|
||||||
// PropagationShared SHARED
|
|
||||||
PropagationShared Propagation = "shared"
|
|
||||||
// PropagationRSlave RSLAVE
|
|
||||||
PropagationRSlave Propagation = "rslave"
|
|
||||||
// PropagationSlave SLAVE
|
|
||||||
PropagationSlave Propagation = "slave"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BindOptions defines options specific to mounts of type "bind".
|
|
||||||
type BindOptions struct {
|
|
||||||
Propagation Propagation `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// VolumeOptions represents the options for a mount of type volume.
|
|
||||||
type VolumeOptions struct {
|
|
||||||
NoCopy bool `json:",omitempty"`
|
|
||||||
Labels map[string]string `json:",omitempty"`
|
|
||||||
DriverConfig *Driver `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Driver represents a volume driver.
|
|
||||||
type Driver struct {
|
|
||||||
Name string `json:",omitempty"`
|
|
||||||
Options map[string]string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TmpfsOptions defines options specific to mounts of type "tmpfs".
|
|
||||||
type TmpfsOptions struct {
|
|
||||||
// Size sets the size of the tmpfs, in bytes.
|
|
||||||
//
|
|
||||||
// This will be converted to an operating system specific value
|
|
||||||
// depending on the host. For example, on linux, it will be convered to
|
|
||||||
// use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
|
|
||||||
// docker, uses a straight byte value.
|
|
||||||
//
|
|
||||||
// Percentages are not supported.
|
|
||||||
SizeBytes int64 `json:",omitempty"`
|
|
||||||
// Mode of the tmpfs upon creation
|
|
||||||
Mode os.FileMode `json:",omitempty"`
|
|
||||||
|
|
||||||
// TODO(stevvooe): There are several more tmpfs flags, specified in the
|
|
||||||
// daemon, that are accepted. Only the most basic are added for now.
|
|
||||||
//
|
|
||||||
// From docker/docker/pkg/mount/flags.go:
|
|
||||||
//
|
|
||||||
// var validFlags = map[string]bool{
|
|
||||||
// "": true,
|
|
||||||
// "size": true, X
|
|
||||||
// "mode": true, X
|
|
||||||
// "uid": true,
|
|
||||||
// "gid": true,
|
|
||||||
// "nr_inodes": true,
|
|
||||||
// "nr_blocks": true,
|
|
||||||
// "mpol": true,
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Some of these may be straightforward to add, but others, such as
|
|
||||||
// uid/gid have implications in a clustered system.
|
|
||||||
}
|
|
59
vendor/github.com/docker/docker/api/types/network/network.go
generated
vendored
59
vendor/github.com/docker/docker/api/types/network/network.go
generated
vendored
|
@ -1,59 +0,0 @@
|
||||||
package network
|
|
||||||
|
|
||||||
// Address represents an IP address
|
|
||||||
type Address struct {
|
|
||||||
Addr string
|
|
||||||
PrefixLen int
|
|
||||||
}
|
|
||||||
|
|
||||||
// IPAM represents IP Address Management
|
|
||||||
type IPAM struct {
|
|
||||||
Driver string
|
|
||||||
Options map[string]string //Per network IPAM driver options
|
|
||||||
Config []IPAMConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// IPAMConfig represents IPAM configurations
|
|
||||||
type IPAMConfig struct {
|
|
||||||
Subnet string `json:",omitempty"`
|
|
||||||
IPRange string `json:",omitempty"`
|
|
||||||
Gateway string `json:",omitempty"`
|
|
||||||
AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// EndpointIPAMConfig represents IPAM configurations for the endpoint
|
|
||||||
type EndpointIPAMConfig struct {
|
|
||||||
IPv4Address string `json:",omitempty"`
|
|
||||||
IPv6Address string `json:",omitempty"`
|
|
||||||
LinkLocalIPs []string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeerInfo represents one peer of a overlay network
|
|
||||||
type PeerInfo struct {
|
|
||||||
Name string
|
|
||||||
IP string
|
|
||||||
}
|
|
||||||
|
|
||||||
// EndpointSettings stores the network endpoint details
|
|
||||||
type EndpointSettings struct {
|
|
||||||
// Configurations
|
|
||||||
IPAMConfig *EndpointIPAMConfig
|
|
||||||
Links []string
|
|
||||||
Aliases []string
|
|
||||||
// Operational data
|
|
||||||
NetworkID string
|
|
||||||
EndpointID string
|
|
||||||
Gateway string
|
|
||||||
IPAddress string
|
|
||||||
IPPrefixLen int
|
|
||||||
IPv6Gateway string
|
|
||||||
GlobalIPv6Address string
|
|
||||||
GlobalIPv6PrefixLen int
|
|
||||||
MacAddress string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkingConfig represents the container's networking configuration for each of its interfaces
|
|
||||||
// Carries the networking configs specified in the `docker run` and `docker network connect` commands
|
|
||||||
type NetworkingConfig struct {
|
|
||||||
EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
|
|
||||||
}
|
|
186
vendor/github.com/docker/docker/api/types/plugin.go
generated
vendored
186
vendor/github.com/docker/docker/api/types/plugin.go
generated
vendored
|
@ -1,186 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// This file was generated by the swagger tool.
|
|
||||||
// Editing this file might prove futile when you re-run the swagger generate command
|
|
||||||
|
|
||||||
// Plugin A plugin for the Engine API
|
|
||||||
// swagger:model Plugin
|
|
||||||
type Plugin struct {
|
|
||||||
|
|
||||||
// config
|
|
||||||
// Required: true
|
|
||||||
Config PluginConfig `json:"Config"`
|
|
||||||
|
|
||||||
// True when the plugin is running. False when the plugin is not running, only installed.
|
|
||||||
// Required: true
|
|
||||||
Enabled bool `json:"Enabled"`
|
|
||||||
|
|
||||||
// Id
|
|
||||||
ID string `json:"Id,omitempty"`
|
|
||||||
|
|
||||||
// name
|
|
||||||
// Required: true
|
|
||||||
Name string `json:"Name"`
|
|
||||||
|
|
||||||
// settings
|
|
||||||
// Required: true
|
|
||||||
Settings PluginSettings `json:"Settings"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginConfig The config of a plugin.
|
|
||||||
// swagger:model PluginConfig
|
|
||||||
type PluginConfig struct {
|
|
||||||
|
|
||||||
// args
|
|
||||||
// Required: true
|
|
||||||
Args PluginConfigArgs `json:"Args"`
|
|
||||||
|
|
||||||
// description
|
|
||||||
// Required: true
|
|
||||||
Description string `json:"Description"`
|
|
||||||
|
|
||||||
// documentation
|
|
||||||
// Required: true
|
|
||||||
Documentation string `json:"Documentation"`
|
|
||||||
|
|
||||||
// entrypoint
|
|
||||||
// Required: true
|
|
||||||
Entrypoint []string `json:"Entrypoint"`
|
|
||||||
|
|
||||||
// env
|
|
||||||
// Required: true
|
|
||||||
Env []PluginEnv `json:"Env"`
|
|
||||||
|
|
||||||
// interface
|
|
||||||
// Required: true
|
|
||||||
Interface PluginConfigInterface `json:"Interface"`
|
|
||||||
|
|
||||||
// linux
|
|
||||||
// Required: true
|
|
||||||
Linux PluginConfigLinux `json:"Linux"`
|
|
||||||
|
|
||||||
// mounts
|
|
||||||
// Required: true
|
|
||||||
Mounts []PluginMount `json:"Mounts"`
|
|
||||||
|
|
||||||
// network
|
|
||||||
// Required: true
|
|
||||||
Network PluginConfigNetwork `json:"Network"`
|
|
||||||
|
|
||||||
// propagated mount
|
|
||||||
// Required: true
|
|
||||||
PropagatedMount string `json:"PropagatedMount"`
|
|
||||||
|
|
||||||
// user
|
|
||||||
User PluginConfigUser `json:"User,omitempty"`
|
|
||||||
|
|
||||||
// work dir
|
|
||||||
// Required: true
|
|
||||||
WorkDir string `json:"WorkDir"`
|
|
||||||
|
|
||||||
// rootfs
|
|
||||||
Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginConfigArgs plugin config args
|
|
||||||
// swagger:model PluginConfigArgs
|
|
||||||
type PluginConfigArgs struct {
|
|
||||||
|
|
||||||
// description
|
|
||||||
// Required: true
|
|
||||||
Description string `json:"Description"`
|
|
||||||
|
|
||||||
// name
|
|
||||||
// Required: true
|
|
||||||
Name string `json:"Name"`
|
|
||||||
|
|
||||||
// settable
|
|
||||||
// Required: true
|
|
||||||
Settable []string `json:"Settable"`
|
|
||||||
|
|
||||||
// value
|
|
||||||
// Required: true
|
|
||||||
Value []string `json:"Value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginConfigInterface The interface between Docker and the plugin
|
|
||||||
// swagger:model PluginConfigInterface
|
|
||||||
type PluginConfigInterface struct {
|
|
||||||
|
|
||||||
// socket
|
|
||||||
// Required: true
|
|
||||||
Socket string `json:"Socket"`
|
|
||||||
|
|
||||||
// types
|
|
||||||
// Required: true
|
|
||||||
Types []PluginInterfaceType `json:"Types"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginConfigLinux plugin config linux
|
|
||||||
// swagger:model PluginConfigLinux
|
|
||||||
type PluginConfigLinux struct {
|
|
||||||
|
|
||||||
// allow all devices
|
|
||||||
// Required: true
|
|
||||||
AllowAllDevices bool `json:"AllowAllDevices"`
|
|
||||||
|
|
||||||
// capabilities
|
|
||||||
// Required: true
|
|
||||||
Capabilities []string `json:"Capabilities"`
|
|
||||||
|
|
||||||
// devices
|
|
||||||
// Required: true
|
|
||||||
Devices []PluginDevice `json:"Devices"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginConfigNetwork plugin config network
|
|
||||||
// swagger:model PluginConfigNetwork
|
|
||||||
type PluginConfigNetwork struct {
|
|
||||||
|
|
||||||
// type
|
|
||||||
// Required: true
|
|
||||||
Type string `json:"Type"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginConfigRootfs plugin config rootfs
|
|
||||||
// swagger:model PluginConfigRootfs
|
|
||||||
type PluginConfigRootfs struct {
|
|
||||||
|
|
||||||
// diff ids
|
|
||||||
DiffIds []string `json:"diff_ids"`
|
|
||||||
|
|
||||||
// type
|
|
||||||
Type string `json:"type,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginConfigUser plugin config user
|
|
||||||
// swagger:model PluginConfigUser
|
|
||||||
type PluginConfigUser struct {
|
|
||||||
|
|
||||||
// g ID
|
|
||||||
GID uint32 `json:"GID,omitempty"`
|
|
||||||
|
|
||||||
// UID
|
|
||||||
UID uint32 `json:"UID,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginSettings Settings that can be modified by users.
|
|
||||||
// swagger:model PluginSettings
|
|
||||||
type PluginSettings struct {
|
|
||||||
|
|
||||||
// args
|
|
||||||
// Required: true
|
|
||||||
Args []string `json:"Args"`
|
|
||||||
|
|
||||||
// devices
|
|
||||||
// Required: true
|
|
||||||
Devices []PluginDevice `json:"Devices"`
|
|
||||||
|
|
||||||
// env
|
|
||||||
// Required: true
|
|
||||||
Env []string `json:"Env"`
|
|
||||||
|
|
||||||
// mounts
|
|
||||||
// Required: true
|
|
||||||
Mounts []PluginMount `json:"Mounts"`
|
|
||||||
}
|
|
25
vendor/github.com/docker/docker/api/types/plugin_device.go
generated
vendored
25
vendor/github.com/docker/docker/api/types/plugin_device.go
generated
vendored
|
@ -1,25 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// This file was generated by the swagger tool.
|
|
||||||
// Editing this file might prove futile when you re-run the swagger generate command
|
|
||||||
|
|
||||||
// PluginDevice plugin device
|
|
||||||
// swagger:model PluginDevice
|
|
||||||
type PluginDevice struct {
|
|
||||||
|
|
||||||
// description
|
|
||||||
// Required: true
|
|
||||||
Description string `json:"Description"`
|
|
||||||
|
|
||||||
// name
|
|
||||||
// Required: true
|
|
||||||
Name string `json:"Name"`
|
|
||||||
|
|
||||||
// path
|
|
||||||
// Required: true
|
|
||||||
Path *string `json:"Path"`
|
|
||||||
|
|
||||||
// settable
|
|
||||||
// Required: true
|
|
||||||
Settable []string `json:"Settable"`
|
|
||||||
}
|
|
25
vendor/github.com/docker/docker/api/types/plugin_env.go
generated
vendored
25
vendor/github.com/docker/docker/api/types/plugin_env.go
generated
vendored
|
@ -1,25 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// This file was generated by the swagger tool.
|
|
||||||
// Editing this file might prove futile when you re-run the swagger generate command
|
|
||||||
|
|
||||||
// PluginEnv plugin env
|
|
||||||
// swagger:model PluginEnv
|
|
||||||
type PluginEnv struct {
|
|
||||||
|
|
||||||
// description
|
|
||||||
// Required: true
|
|
||||||
Description string `json:"Description"`
|
|
||||||
|
|
||||||
// name
|
|
||||||
// Required: true
|
|
||||||
Name string `json:"Name"`
|
|
||||||
|
|
||||||
// settable
|
|
||||||
// Required: true
|
|
||||||
Settable []string `json:"Settable"`
|
|
||||||
|
|
||||||
// value
|
|
||||||
// Required: true
|
|
||||||
Value *string `json:"Value"`
|
|
||||||
}
|
|
21
vendor/github.com/docker/docker/api/types/plugin_interface_type.go
generated
vendored
21
vendor/github.com/docker/docker/api/types/plugin_interface_type.go
generated
vendored
|
@ -1,21 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// This file was generated by the swagger tool.
|
|
||||||
// Editing this file might prove futile when you re-run the swagger generate command
|
|
||||||
|
|
||||||
// PluginInterfaceType plugin interface type
|
|
||||||
// swagger:model PluginInterfaceType
|
|
||||||
type PluginInterfaceType struct {
|
|
||||||
|
|
||||||
// capability
|
|
||||||
// Required: true
|
|
||||||
Capability string `json:"Capability"`
|
|
||||||
|
|
||||||
// prefix
|
|
||||||
// Required: true
|
|
||||||
Prefix string `json:"Prefix"`
|
|
||||||
|
|
||||||
// version
|
|
||||||
// Required: true
|
|
||||||
Version string `json:"Version"`
|
|
||||||
}
|
|
37
vendor/github.com/docker/docker/api/types/plugin_mount.go
generated
vendored
37
vendor/github.com/docker/docker/api/types/plugin_mount.go
generated
vendored
|
@ -1,37 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// This file was generated by the swagger tool.
|
|
||||||
// Editing this file might prove futile when you re-run the swagger generate command
|
|
||||||
|
|
||||||
// PluginMount plugin mount
|
|
||||||
// swagger:model PluginMount
|
|
||||||
type PluginMount struct {
|
|
||||||
|
|
||||||
// description
|
|
||||||
// Required: true
|
|
||||||
Description string `json:"Description"`
|
|
||||||
|
|
||||||
// destination
|
|
||||||
// Required: true
|
|
||||||
Destination string `json:"Destination"`
|
|
||||||
|
|
||||||
// name
|
|
||||||
// Required: true
|
|
||||||
Name string `json:"Name"`
|
|
||||||
|
|
||||||
// options
|
|
||||||
// Required: true
|
|
||||||
Options []string `json:"Options"`
|
|
||||||
|
|
||||||
// settable
|
|
||||||
// Required: true
|
|
||||||
Settable []string `json:"Settable"`
|
|
||||||
|
|
||||||
// source
|
|
||||||
// Required: true
|
|
||||||
Source *string `json:"Source"`
|
|
||||||
|
|
||||||
// type
|
|
||||||
// Required: true
|
|
||||||
Type string `json:"Type"`
|
|
||||||
}
|
|
64
vendor/github.com/docker/docker/api/types/plugin_responses.go
generated
vendored
64
vendor/github.com/docker/docker/api/types/plugin_responses.go
generated
vendored
|
@ -1,64 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PluginsListResponse contains the response for the Engine API
|
|
||||||
type PluginsListResponse []*Plugin
|
|
||||||
|
|
||||||
const (
|
|
||||||
authzDriver = "AuthzDriver"
|
|
||||||
graphDriver = "GraphDriver"
|
|
||||||
ipamDriver = "IpamDriver"
|
|
||||||
networkDriver = "NetworkDriver"
|
|
||||||
volumeDriver = "VolumeDriver"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
|
|
||||||
func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
|
|
||||||
versionIndex := len(p)
|
|
||||||
prefixIndex := 0
|
|
||||||
if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
|
|
||||||
return fmt.Errorf("%q is not a plugin interface type", p)
|
|
||||||
}
|
|
||||||
p = p[1 : len(p)-1]
|
|
||||||
loop:
|
|
||||||
for i, b := range p {
|
|
||||||
switch b {
|
|
||||||
case '.':
|
|
||||||
prefixIndex = i
|
|
||||||
case '/':
|
|
||||||
versionIndex = i
|
|
||||||
break loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.Prefix = string(p[:prefixIndex])
|
|
||||||
t.Capability = string(p[prefixIndex+1 : versionIndex])
|
|
||||||
if versionIndex < len(p) {
|
|
||||||
t.Version = string(p[versionIndex+1:])
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON implements json.Marshaler for PluginInterfaceType
|
|
||||||
func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(t.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// String implements fmt.Stringer for PluginInterfaceType
|
|
||||||
func (t PluginInterfaceType) String() string {
|
|
||||||
return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginPrivilege describes a permission the user has to accept
|
|
||||||
// upon installing a plugin.
|
|
||||||
type PluginPrivilege struct {
|
|
||||||
Name string
|
|
||||||
Description string
|
|
||||||
Value []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginPrivileges is a list of PluginPrivilege
|
|
||||||
type PluginPrivileges []PluginPrivilege
|
|
23
vendor/github.com/docker/docker/api/types/port.go
generated
vendored
23
vendor/github.com/docker/docker/api/types/port.go
generated
vendored
|
@ -1,23 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// This file was generated by the swagger tool.
|
|
||||||
// Editing this file might prove futile when you re-run the swagger generate command
|
|
||||||
|
|
||||||
// Port An open port on a container
|
|
||||||
// swagger:model Port
|
|
||||||
type Port struct {
|
|
||||||
|
|
||||||
// IP
|
|
||||||
IP string `json:"IP,omitempty"`
|
|
||||||
|
|
||||||
// Port on the container
|
|
||||||
// Required: true
|
|
||||||
PrivatePort uint16 `json:"PrivatePort"`
|
|
||||||
|
|
||||||
// Port exposed on the host
|
|
||||||
PublicPort uint16 `json:"PublicPort,omitempty"`
|
|
||||||
|
|
||||||
// type
|
|
||||||
// Required: true
|
|
||||||
Type string `json:"Type"`
|
|
||||||
}
|
|
21
vendor/github.com/docker/docker/api/types/registry/authenticate.go
generated
vendored
21
vendor/github.com/docker/docker/api/types/registry/authenticate.go
generated
vendored
|
@ -1,21 +0,0 @@
|
||||||
package registry
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// DO NOT EDIT THIS FILE
|
|
||||||
// This file was generated by `swagger generate operation`
|
|
||||||
//
|
|
||||||
// See hack/swagger-gen.sh
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// AuthenticateOKBody authenticate o k body
|
|
||||||
// swagger:model AuthenticateOKBody
|
|
||||||
type AuthenticateOKBody struct {
|
|
||||||
|
|
||||||
// An opaque token used to authenticate a user after a successful login
|
|
||||||
// Required: true
|
|
||||||
IdentityToken string `json:"IdentityToken"`
|
|
||||||
|
|
||||||
// The status of the authentication
|
|
||||||
// Required: true
|
|
||||||
Status string `json:"Status"`
|
|
||||||
}
|
|
104
vendor/github.com/docker/docker/api/types/registry/registry.go
generated
vendored
104
vendor/github.com/docker/docker/api/types/registry/registry.go
generated
vendored
|
@ -1,104 +0,0 @@
|
||||||
package registry
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServiceConfig stores daemon registry services configuration.
|
|
||||||
type ServiceConfig struct {
|
|
||||||
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
|
|
||||||
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
|
|
||||||
Mirrors []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetIPNet is the net.IPNet type, which can be marshalled and
|
|
||||||
// unmarshalled to JSON
|
|
||||||
type NetIPNet net.IPNet
|
|
||||||
|
|
||||||
// String returns the CIDR notation of ipnet
|
|
||||||
func (ipnet *NetIPNet) String() string {
|
|
||||||
return (*net.IPNet)(ipnet).String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON returns the JSON representation of the IPNet
|
|
||||||
func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal((*net.IPNet)(ipnet).String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON sets the IPNet from a byte array of JSON
|
|
||||||
func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
|
|
||||||
var ipnetStr string
|
|
||||||
if err = json.Unmarshal(b, &ipnetStr); err == nil {
|
|
||||||
var cidr *net.IPNet
|
|
||||||
if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
|
|
||||||
*ipnet = NetIPNet(*cidr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// IndexInfo contains information about a registry
|
|
||||||
//
|
|
||||||
// RepositoryInfo Examples:
|
|
||||||
// {
|
|
||||||
// "Index" : {
|
|
||||||
// "Name" : "docker.io",
|
|
||||||
// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
|
|
||||||
// "Secure" : true,
|
|
||||||
// "Official" : true,
|
|
||||||
// },
|
|
||||||
// "RemoteName" : "library/debian",
|
|
||||||
// "LocalName" : "debian",
|
|
||||||
// "CanonicalName" : "docker.io/debian"
|
|
||||||
// "Official" : true,
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// "Index" : {
|
|
||||||
// "Name" : "127.0.0.1:5000",
|
|
||||||
// "Mirrors" : [],
|
|
||||||
// "Secure" : false,
|
|
||||||
// "Official" : false,
|
|
||||||
// },
|
|
||||||
// "RemoteName" : "user/repo",
|
|
||||||
// "LocalName" : "127.0.0.1:5000/user/repo",
|
|
||||||
// "CanonicalName" : "127.0.0.1:5000/user/repo",
|
|
||||||
// "Official" : false,
|
|
||||||
// }
|
|
||||||
type IndexInfo struct {
|
|
||||||
// Name is the name of the registry, such as "docker.io"
|
|
||||||
Name string
|
|
||||||
// Mirrors is a list of mirrors, expressed as URIs
|
|
||||||
Mirrors []string
|
|
||||||
// Secure is set to false if the registry is part of the list of
|
|
||||||
// insecure registries. Insecure registries accept HTTP and/or accept
|
|
||||||
// HTTPS with certificates from unknown CAs.
|
|
||||||
Secure bool
|
|
||||||
// Official indicates whether this is an official registry
|
|
||||||
Official bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// SearchResult describes a search result returned from a registry
|
|
||||||
type SearchResult struct {
|
|
||||||
// StarCount indicates the number of stars this repository has
|
|
||||||
StarCount int `json:"star_count"`
|
|
||||||
// IsOfficial is true if the result is from an official repository.
|
|
||||||
IsOfficial bool `json:"is_official"`
|
|
||||||
// Name is the name of the repository
|
|
||||||
Name string `json:"name"`
|
|
||||||
// IsAutomated indicates whether the result is automated
|
|
||||||
IsAutomated bool `json:"is_automated"`
|
|
||||||
// Description is a textual description of the repository
|
|
||||||
Description string `json:"description"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SearchResults lists a collection search results returned from a registry
|
|
||||||
type SearchResults struct {
|
|
||||||
// Query contains the query string that generated the search results
|
|
||||||
Query string `json:"query"`
|
|
||||||
// NumResults indicates the number of results the query returned
|
|
||||||
NumResults int `json:"num_results"`
|
|
||||||
// Results is a slice containing the actual results for the search
|
|
||||||
Results []SearchResult `json:"results"`
|
|
||||||
}
|
|
93
vendor/github.com/docker/docker/api/types/seccomp.go
generated
vendored
93
vendor/github.com/docker/docker/api/types/seccomp.go
generated
vendored
|
@ -1,93 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// Seccomp represents the config for a seccomp profile for syscall restriction.
|
|
||||||
type Seccomp struct {
|
|
||||||
DefaultAction Action `json:"defaultAction"`
|
|
||||||
// Architectures is kept to maintain backward compatibility with the old
|
|
||||||
// seccomp profile.
|
|
||||||
Architectures []Arch `json:"architectures,omitempty"`
|
|
||||||
ArchMap []Architecture `json:"archMap,omitempty"`
|
|
||||||
Syscalls []*Syscall `json:"syscalls"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Architecture is used to represent an specific architecture
|
|
||||||
// and its sub-architectures
|
|
||||||
type Architecture struct {
|
|
||||||
Arch Arch `json:"architecture"`
|
|
||||||
SubArches []Arch `json:"subArchitectures"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Arch used for architectures
|
|
||||||
type Arch string
|
|
||||||
|
|
||||||
// Additional architectures permitted to be used for system calls
|
|
||||||
// By default only the native architecture of the kernel is permitted
|
|
||||||
const (
|
|
||||||
ArchX86 Arch = "SCMP_ARCH_X86"
|
|
||||||
ArchX86_64 Arch = "SCMP_ARCH_X86_64"
|
|
||||||
ArchX32 Arch = "SCMP_ARCH_X32"
|
|
||||||
ArchARM Arch = "SCMP_ARCH_ARM"
|
|
||||||
ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
|
|
||||||
ArchMIPS Arch = "SCMP_ARCH_MIPS"
|
|
||||||
ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
|
|
||||||
ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
|
|
||||||
ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
|
|
||||||
ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
|
|
||||||
ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
|
|
||||||
ArchPPC Arch = "SCMP_ARCH_PPC"
|
|
||||||
ArchPPC64 Arch = "SCMP_ARCH_PPC64"
|
|
||||||
ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
|
|
||||||
ArchS390 Arch = "SCMP_ARCH_S390"
|
|
||||||
ArchS390X Arch = "SCMP_ARCH_S390X"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Action taken upon Seccomp rule match
|
|
||||||
type Action string
|
|
||||||
|
|
||||||
// Define actions for Seccomp rules
|
|
||||||
const (
|
|
||||||
ActKill Action = "SCMP_ACT_KILL"
|
|
||||||
ActTrap Action = "SCMP_ACT_TRAP"
|
|
||||||
ActErrno Action = "SCMP_ACT_ERRNO"
|
|
||||||
ActTrace Action = "SCMP_ACT_TRACE"
|
|
||||||
ActAllow Action = "SCMP_ACT_ALLOW"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Operator used to match syscall arguments in Seccomp
|
|
||||||
type Operator string
|
|
||||||
|
|
||||||
// Define operators for syscall arguments in Seccomp
|
|
||||||
const (
|
|
||||||
OpNotEqual Operator = "SCMP_CMP_NE"
|
|
||||||
OpLessThan Operator = "SCMP_CMP_LT"
|
|
||||||
OpLessEqual Operator = "SCMP_CMP_LE"
|
|
||||||
OpEqualTo Operator = "SCMP_CMP_EQ"
|
|
||||||
OpGreaterEqual Operator = "SCMP_CMP_GE"
|
|
||||||
OpGreaterThan Operator = "SCMP_CMP_GT"
|
|
||||||
OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Arg used for matching specific syscall arguments in Seccomp
|
|
||||||
type Arg struct {
|
|
||||||
Index uint `json:"index"`
|
|
||||||
Value uint64 `json:"value"`
|
|
||||||
ValueTwo uint64 `json:"valueTwo"`
|
|
||||||
Op Operator `json:"op"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter is used to conditionally apply Seccomp rules
|
|
||||||
type Filter struct {
|
|
||||||
Caps []string `json:"caps,omitempty"`
|
|
||||||
Arches []string `json:"arches,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Syscall is used to match a group of syscalls in Seccomp
|
|
||||||
type Syscall struct {
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Names []string `json:"names,omitempty"`
|
|
||||||
Action Action `json:"action"`
|
|
||||||
Args []*Arg `json:"args"`
|
|
||||||
Comment string `json:"comment"`
|
|
||||||
Includes Filter `json:"includes"`
|
|
||||||
Excludes Filter `json:"excludes"`
|
|
||||||
}
|
|
12
vendor/github.com/docker/docker/api/types/service_update_response.go
generated
vendored
12
vendor/github.com/docker/docker/api/types/service_update_response.go
generated
vendored
|
@ -1,12 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// This file was generated by the swagger tool.
|
|
||||||
// Editing this file might prove futile when you re-run the swagger generate command
|
|
||||||
|
|
||||||
// ServiceUpdateResponse service update response
|
|
||||||
// swagger:model ServiceUpdateResponse
|
|
||||||
type ServiceUpdateResponse struct {
|
|
||||||
|
|
||||||
// Optional warning messages
|
|
||||||
Warnings []string `json:"Warnings"`
|
|
||||||
}
|
|
178
vendor/github.com/docker/docker/api/types/stats.go
generated
vendored
178
vendor/github.com/docker/docker/api/types/stats.go
generated
vendored
|
@ -1,178 +0,0 @@
|
||||||
// Package types is used for API stability in the types and response to the
|
|
||||||
// consumers of the API stats endpoint.
|
|
||||||
package types
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// ThrottlingData stores CPU throttling stats of one running container.
|
|
||||||
// Not used on Windows.
|
|
||||||
type ThrottlingData struct {
|
|
||||||
// Number of periods with throttling active
|
|
||||||
Periods uint64 `json:"periods"`
|
|
||||||
// Number of periods when the container hits its throttling limit.
|
|
||||||
ThrottledPeriods uint64 `json:"throttled_periods"`
|
|
||||||
// Aggregate time the container was throttled for in nanoseconds.
|
|
||||||
ThrottledTime uint64 `json:"throttled_time"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CPUUsage stores All CPU stats aggregated since container inception.
|
|
||||||
type CPUUsage struct {
|
|
||||||
// Total CPU time consumed.
|
|
||||||
// Units: nanoseconds (Linux)
|
|
||||||
// Units: 100's of nanoseconds (Windows)
|
|
||||||
TotalUsage uint64 `json:"total_usage"`
|
|
||||||
|
|
||||||
// Total CPU time consumed per core (Linux). Not used on Windows.
|
|
||||||
// Units: nanoseconds.
|
|
||||||
PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
|
|
||||||
|
|
||||||
// Time spent by tasks of the cgroup in kernel mode (Linux).
|
|
||||||
// Time spent by all container processes in kernel mode (Windows).
|
|
||||||
// Units: nanoseconds (Linux).
|
|
||||||
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
|
|
||||||
UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
|
|
||||||
|
|
||||||
// Time spent by tasks of the cgroup in user mode (Linux).
|
|
||||||
// Time spent by all container processes in user mode (Windows).
|
|
||||||
// Units: nanoseconds (Linux).
|
|
||||||
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
|
|
||||||
UsageInUsermode uint64 `json:"usage_in_usermode"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CPUStats aggregates and wraps all CPU related info of container
|
|
||||||
type CPUStats struct {
|
|
||||||
// CPU Usage. Linux and Windows.
|
|
||||||
CPUUsage CPUUsage `json:"cpu_usage"`
|
|
||||||
|
|
||||||
// System Usage. Linux only.
|
|
||||||
SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
|
|
||||||
|
|
||||||
// Throttling Data. Linux only.
|
|
||||||
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemoryStats aggregates all memory stats since container inception on Linux.
|
|
||||||
// Windows returns stats for commit and private working set only.
|
|
||||||
type MemoryStats struct {
|
|
||||||
// Linux Memory Stats
|
|
||||||
|
|
||||||
// current res_counter usage for memory
|
|
||||||
Usage uint64 `json:"usage,omitempty"`
|
|
||||||
// maximum usage ever recorded.
|
|
||||||
MaxUsage uint64 `json:"max_usage,omitempty"`
|
|
||||||
// TODO(vishh): Export these as stronger types.
|
|
||||||
// all the stats exported via memory.stat.
|
|
||||||
Stats map[string]uint64 `json:"stats,omitempty"`
|
|
||||||
// number of times memory usage hits limits.
|
|
||||||
Failcnt uint64 `json:"failcnt,omitempty"`
|
|
||||||
Limit uint64 `json:"limit,omitempty"`
|
|
||||||
|
|
||||||
// Windows Memory Stats
|
|
||||||
// See https://technet.microsoft.com/en-us/magazine/ff382715.aspx
|
|
||||||
|
|
||||||
// committed bytes
|
|
||||||
Commit uint64 `json:"commitbytes,omitempty"`
|
|
||||||
// peak committed bytes
|
|
||||||
CommitPeak uint64 `json:"commitpeakbytes,omitempty"`
|
|
||||||
// private working set
|
|
||||||
PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlkioStatEntry is one small entity to store a piece of Blkio stats
|
|
||||||
// Not used on Windows.
|
|
||||||
type BlkioStatEntry struct {
|
|
||||||
Major uint64 `json:"major"`
|
|
||||||
Minor uint64 `json:"minor"`
|
|
||||||
Op string `json:"op"`
|
|
||||||
Value uint64 `json:"value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlkioStats stores All IO service stats for data read and write.
|
|
||||||
// This is a Linux specific structure as the differences between expressing
|
|
||||||
// block I/O on Windows and Linux are sufficiently significant to make
|
|
||||||
// little sense attempting to morph into a combined structure.
|
|
||||||
type BlkioStats struct {
|
|
||||||
// number of bytes transferred to and from the block device
|
|
||||||
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
|
|
||||||
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
|
|
||||||
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
|
|
||||||
IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
|
|
||||||
IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
|
|
||||||
IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
|
|
||||||
IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
|
|
||||||
SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// StorageStats is the disk I/O stats for read/write on Windows.
|
|
||||||
type StorageStats struct {
|
|
||||||
ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"`
|
|
||||||
ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"`
|
|
||||||
WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"`
|
|
||||||
WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkStats aggregates the network stats of one container
|
|
||||||
type NetworkStats struct {
|
|
||||||
// Bytes received. Windows and Linux.
|
|
||||||
RxBytes uint64 `json:"rx_bytes"`
|
|
||||||
// Packets received. Windows and Linux.
|
|
||||||
RxPackets uint64 `json:"rx_packets"`
|
|
||||||
// Received errors. Not used on Windows. Note that we dont `omitempty` this
|
|
||||||
// field as it is expected in the >=v1.21 API stats structure.
|
|
||||||
RxErrors uint64 `json:"rx_errors"`
|
|
||||||
// Incoming packets dropped. Windows and Linux.
|
|
||||||
RxDropped uint64 `json:"rx_dropped"`
|
|
||||||
// Bytes sent. Windows and Linux.
|
|
||||||
TxBytes uint64 `json:"tx_bytes"`
|
|
||||||
// Packets sent. Windows and Linux.
|
|
||||||
TxPackets uint64 `json:"tx_packets"`
|
|
||||||
// Sent errors. Not used on Windows. Note that we dont `omitempty` this
|
|
||||||
// field as it is expected in the >=v1.21 API stats structure.
|
|
||||||
TxErrors uint64 `json:"tx_errors"`
|
|
||||||
// Outgoing packets dropped. Windows and Linux.
|
|
||||||
TxDropped uint64 `json:"tx_dropped"`
|
|
||||||
// Endpoint ID. Not used on Linux.
|
|
||||||
EndpointID string `json:"endpoint_id,omitempty"`
|
|
||||||
// Instance ID. Not used on Linux.
|
|
||||||
InstanceID string `json:"instance_id,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PidsStats contains the stats of a container's pids
|
|
||||||
type PidsStats struct {
|
|
||||||
// Current is the number of pids in the cgroup
|
|
||||||
Current uint64 `json:"current,omitempty"`
|
|
||||||
// Limit is the hard limit on the number of pids in the cgroup.
|
|
||||||
// A "Limit" of 0 means that there is no limit.
|
|
||||||
Limit uint64 `json:"limit,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stats is Ultimate struct aggregating all types of stats of one container
|
|
||||||
type Stats struct {
|
|
||||||
// Common stats
|
|
||||||
Read time.Time `json:"read"`
|
|
||||||
PreRead time.Time `json:"preread"`
|
|
||||||
|
|
||||||
// Linux specific stats, not populated on Windows.
|
|
||||||
PidsStats PidsStats `json:"pids_stats,omitempty"`
|
|
||||||
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
|
|
||||||
|
|
||||||
// Windows specific stats, not populated on Linux.
|
|
||||||
NumProcs uint32 `json:"num_procs"`
|
|
||||||
StorageStats StorageStats `json:"storage_stats,omitempty"`
|
|
||||||
|
|
||||||
// Shared stats
|
|
||||||
CPUStats CPUStats `json:"cpu_stats,omitempty"`
|
|
||||||
PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
|
|
||||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatsJSON is newly used Networks
|
|
||||||
type StatsJSON struct {
|
|
||||||
Stats
|
|
||||||
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
ID string `json:"id,omitempty"`
|
|
||||||
|
|
||||||
// Networks request version >=1.21
|
|
||||||
Networks map[string]NetworkStats `json:"networks,omitempty"`
|
|
||||||
}
|
|
30
vendor/github.com/docker/docker/api/types/strslice/strslice.go
generated
vendored
30
vendor/github.com/docker/docker/api/types/strslice/strslice.go
generated
vendored
|
@ -1,30 +0,0 @@
|
||||||
package strslice
|
|
||||||
|
|
||||||
import "encoding/json"
|
|
||||||
|
|
||||||
// StrSlice represents a string or an array of strings.
|
|
||||||
// We need to override the json decoder to accept both options.
|
|
||||||
type StrSlice []string
|
|
||||||
|
|
||||||
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
|
|
||||||
// strings. This method is needed to implement json.Unmarshaler.
|
|
||||||
func (e *StrSlice) UnmarshalJSON(b []byte) error {
|
|
||||||
if len(b) == 0 {
|
|
||||||
// With no input, we preserve the existing value by returning nil and
|
|
||||||
// leaving the target alone. This allows defining default values for
|
|
||||||
// the type.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
p := make([]string, 0, 1)
|
|
||||||
if err := json.Unmarshal(b, &p); err != nil {
|
|
||||||
var s string
|
|
||||||
if err := json.Unmarshal(b, &s); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
p = append(p, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
*e = p
|
|
||||||
return nil
|
|
||||||
}
|
|
27
vendor/github.com/docker/docker/api/types/swarm/common.go
generated
vendored
27
vendor/github.com/docker/docker/api/types/swarm/common.go
generated
vendored
|
@ -1,27 +0,0 @@
|
||||||
package swarm
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// Version represents the internal object version.
|
|
||||||
type Version struct {
|
|
||||||
Index uint64 `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Meta is a base object inherited by most of the other once.
|
|
||||||
type Meta struct {
|
|
||||||
Version Version `json:",omitempty"`
|
|
||||||
CreatedAt time.Time `json:",omitempty"`
|
|
||||||
UpdatedAt time.Time `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Annotations represents how to describe an object.
|
|
||||||
type Annotations struct {
|
|
||||||
Name string `json:",omitempty"`
|
|
||||||
Labels map[string]string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Driver represents a driver (network, logging).
|
|
||||||
type Driver struct {
|
|
||||||
Name string `json:",omitempty"`
|
|
||||||
Options map[string]string `json:",omitempty"`
|
|
||||||
}
|
|
46
vendor/github.com/docker/docker/api/types/swarm/container.go
generated
vendored
46
vendor/github.com/docker/docker/api/types/swarm/container.go
generated
vendored
|
@ -1,46 +0,0 @@
|
||||||
package swarm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/docker/docker/api/types/mount"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
|
|
||||||
// Detailed documentation is available in:
|
|
||||||
// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
|
|
||||||
// `nameserver`, `search`, `options` have been supported.
|
|
||||||
// TODO: `domain` is not supported yet.
|
|
||||||
type DNSConfig struct {
|
|
||||||
// Nameservers specifies the IP addresses of the name servers
|
|
||||||
Nameservers []string `json:",omitempty"`
|
|
||||||
// Search specifies the search list for host-name lookup
|
|
||||||
Search []string `json:",omitempty"`
|
|
||||||
// Options allows certain internal resolver variables to be modified
|
|
||||||
Options []string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerSpec represents the spec of a container.
|
|
||||||
type ContainerSpec struct {
|
|
||||||
Image string `json:",omitempty"`
|
|
||||||
Labels map[string]string `json:",omitempty"`
|
|
||||||
Command []string `json:",omitempty"`
|
|
||||||
Args []string `json:",omitempty"`
|
|
||||||
Hostname string `json:",omitempty"`
|
|
||||||
Env []string `json:",omitempty"`
|
|
||||||
Dir string `json:",omitempty"`
|
|
||||||
User string `json:",omitempty"`
|
|
||||||
Groups []string `json:",omitempty"`
|
|
||||||
TTY bool `json:",omitempty"`
|
|
||||||
OpenStdin bool `json:",omitempty"`
|
|
||||||
Mounts []mount.Mount `json:",omitempty"`
|
|
||||||
StopGracePeriod *time.Duration `json:",omitempty"`
|
|
||||||
Healthcheck *container.HealthConfig `json:",omitempty"`
|
|
||||||
// The format of extra hosts on swarmkit is specified in:
|
|
||||||
// http://man7.org/linux/man-pages/man5/hosts.5.html
|
|
||||||
// IP_address canonical_hostname [aliases...]
|
|
||||||
Hosts []string `json:",omitempty"`
|
|
||||||
DNSConfig *DNSConfig `json:",omitempty"`
|
|
||||||
Secrets []*SecretReference `json:",omitempty"`
|
|
||||||
}
|
|
111
vendor/github.com/docker/docker/api/types/swarm/network.go
generated
vendored
111
vendor/github.com/docker/docker/api/types/swarm/network.go
generated
vendored
|
@ -1,111 +0,0 @@
|
||||||
package swarm
|
|
||||||
|
|
||||||
// Endpoint represents an endpoint.
|
|
||||||
type Endpoint struct {
|
|
||||||
Spec EndpointSpec `json:",omitempty"`
|
|
||||||
Ports []PortConfig `json:",omitempty"`
|
|
||||||
VirtualIPs []EndpointVirtualIP `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// EndpointSpec represents the spec of an endpoint.
|
|
||||||
type EndpointSpec struct {
|
|
||||||
Mode ResolutionMode `json:",omitempty"`
|
|
||||||
Ports []PortConfig `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolutionMode represents a resolution mode.
|
|
||||||
type ResolutionMode string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ResolutionModeVIP VIP
|
|
||||||
ResolutionModeVIP ResolutionMode = "vip"
|
|
||||||
// ResolutionModeDNSRR DNSRR
|
|
||||||
ResolutionModeDNSRR ResolutionMode = "dnsrr"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PortConfig represents the config of a port.
|
|
||||||
type PortConfig struct {
|
|
||||||
Name string `json:",omitempty"`
|
|
||||||
Protocol PortConfigProtocol `json:",omitempty"`
|
|
||||||
// TargetPort is the port inside the container
|
|
||||||
TargetPort uint32 `json:",omitempty"`
|
|
||||||
// PublishedPort is the port on the swarm hosts
|
|
||||||
PublishedPort uint32 `json:",omitempty"`
|
|
||||||
// PublishMode is the mode in which port is published
|
|
||||||
PublishMode PortConfigPublishMode `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PortConfigPublishMode represents the mode in which the port is to
|
|
||||||
// be published.
|
|
||||||
type PortConfigPublishMode string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// PortConfigPublishModeIngress is used for ports published
|
|
||||||
// for ingress load balancing using routing mesh.
|
|
||||||
PortConfigPublishModeIngress PortConfigPublishMode = "ingress"
|
|
||||||
// PortConfigPublishModeHost is used for ports published
|
|
||||||
// for direct host level access on the host where the task is running.
|
|
||||||
PortConfigPublishModeHost PortConfigPublishMode = "host"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PortConfigProtocol represents the protocol of a port.
|
|
||||||
type PortConfigProtocol string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// TODO(stevvooe): These should be used generally, not just for PortConfig.
|
|
||||||
|
|
||||||
// PortConfigProtocolTCP TCP
|
|
||||||
PortConfigProtocolTCP PortConfigProtocol = "tcp"
|
|
||||||
// PortConfigProtocolUDP UDP
|
|
||||||
PortConfigProtocolUDP PortConfigProtocol = "udp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EndpointVirtualIP represents the virtual ip of a port.
|
|
||||||
type EndpointVirtualIP struct {
|
|
||||||
NetworkID string `json:",omitempty"`
|
|
||||||
Addr string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Network represents a network.
|
|
||||||
type Network struct {
|
|
||||||
ID string
|
|
||||||
Meta
|
|
||||||
Spec NetworkSpec `json:",omitempty"`
|
|
||||||
DriverState Driver `json:",omitempty"`
|
|
||||||
IPAMOptions *IPAMOptions `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkSpec represents the spec of a network.
|
|
||||||
type NetworkSpec struct {
|
|
||||||
Annotations
|
|
||||||
DriverConfiguration *Driver `json:",omitempty"`
|
|
||||||
IPv6Enabled bool `json:",omitempty"`
|
|
||||||
Internal bool `json:",omitempty"`
|
|
||||||
Attachable bool `json:",omitempty"`
|
|
||||||
IPAMOptions *IPAMOptions `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkAttachmentConfig represents the configuration of a network attachment.
|
|
||||||
type NetworkAttachmentConfig struct {
|
|
||||||
Target string `json:",omitempty"`
|
|
||||||
Aliases []string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkAttachment represents a network attachment.
|
|
||||||
type NetworkAttachment struct {
|
|
||||||
Network Network `json:",omitempty"`
|
|
||||||
Addresses []string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// IPAMOptions represents ipam options.
|
|
||||||
type IPAMOptions struct {
|
|
||||||
Driver Driver `json:",omitempty"`
|
|
||||||
Configs []IPAMConfig `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// IPAMConfig represents ipam configuration.
|
|
||||||
type IPAMConfig struct {
|
|
||||||
Subnet string `json:",omitempty"`
|
|
||||||
Range string `json:",omitempty"`
|
|
||||||
Gateway string `json:",omitempty"`
|
|
||||||
}
|
|
114
vendor/github.com/docker/docker/api/types/swarm/node.go
generated
vendored
114
vendor/github.com/docker/docker/api/types/swarm/node.go
generated
vendored
|
@ -1,114 +0,0 @@
|
||||||
package swarm
|
|
||||||
|
|
||||||
// Node represents a node.
|
|
||||||
type Node struct {
|
|
||||||
ID string
|
|
||||||
Meta
|
|
||||||
// Spec defines the desired state of the node as specified by the user.
|
|
||||||
// The system will honor this and will *never* modify it.
|
|
||||||
Spec NodeSpec `json:",omitempty"`
|
|
||||||
// Description encapsulates the properties of the Node as reported by the
|
|
||||||
// agent.
|
|
||||||
Description NodeDescription `json:",omitempty"`
|
|
||||||
// Status provides the current status of the node, as seen by the manager.
|
|
||||||
Status NodeStatus `json:",omitempty"`
|
|
||||||
// ManagerStatus provides the current status of the node's manager
|
|
||||||
// component, if the node is a manager.
|
|
||||||
ManagerStatus *ManagerStatus `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeSpec represents the spec of a node.
|
|
||||||
type NodeSpec struct {
|
|
||||||
Annotations
|
|
||||||
Role NodeRole `json:",omitempty"`
|
|
||||||
Availability NodeAvailability `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeRole represents the role of a node.
|
|
||||||
type NodeRole string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// NodeRoleWorker WORKER
|
|
||||||
NodeRoleWorker NodeRole = "worker"
|
|
||||||
// NodeRoleManager MANAGER
|
|
||||||
NodeRoleManager NodeRole = "manager"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NodeAvailability represents the availability of a node.
|
|
||||||
type NodeAvailability string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// NodeAvailabilityActive ACTIVE
|
|
||||||
NodeAvailabilityActive NodeAvailability = "active"
|
|
||||||
// NodeAvailabilityPause PAUSE
|
|
||||||
NodeAvailabilityPause NodeAvailability = "pause"
|
|
||||||
// NodeAvailabilityDrain DRAIN
|
|
||||||
NodeAvailabilityDrain NodeAvailability = "drain"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NodeDescription represents the description of a node.
|
|
||||||
type NodeDescription struct {
|
|
||||||
Hostname string `json:",omitempty"`
|
|
||||||
Platform Platform `json:",omitempty"`
|
|
||||||
Resources Resources `json:",omitempty"`
|
|
||||||
Engine EngineDescription `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Platform represents the platform (Arch/OS).
|
|
||||||
type Platform struct {
|
|
||||||
Architecture string `json:",omitempty"`
|
|
||||||
OS string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// EngineDescription represents the description of an engine.
|
|
||||||
type EngineDescription struct {
|
|
||||||
EngineVersion string `json:",omitempty"`
|
|
||||||
Labels map[string]string `json:",omitempty"`
|
|
||||||
Plugins []PluginDescription `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginDescription represents the description of an engine plugin.
|
|
||||||
type PluginDescription struct {
|
|
||||||
Type string `json:",omitempty"`
|
|
||||||
Name string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeStatus represents the status of a node.
|
|
||||||
type NodeStatus struct {
|
|
||||||
State NodeState `json:",omitempty"`
|
|
||||||
Message string `json:",omitempty"`
|
|
||||||
Addr string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reachability represents the reachability of a node.
|
|
||||||
type Reachability string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ReachabilityUnknown UNKNOWN
|
|
||||||
ReachabilityUnknown Reachability = "unknown"
|
|
||||||
// ReachabilityUnreachable UNREACHABLE
|
|
||||||
ReachabilityUnreachable Reachability = "unreachable"
|
|
||||||
// ReachabilityReachable REACHABLE
|
|
||||||
ReachabilityReachable Reachability = "reachable"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ManagerStatus represents the status of a manager.
|
|
||||||
type ManagerStatus struct {
|
|
||||||
Leader bool `json:",omitempty"`
|
|
||||||
Reachability Reachability `json:",omitempty"`
|
|
||||||
Addr string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeState represents the state of a node.
|
|
||||||
type NodeState string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// NodeStateUnknown UNKNOWN
|
|
||||||
NodeStateUnknown NodeState = "unknown"
|
|
||||||
// NodeStateDown DOWN
|
|
||||||
NodeStateDown NodeState = "down"
|
|
||||||
// NodeStateReady READY
|
|
||||||
NodeStateReady NodeState = "ready"
|
|
||||||
// NodeStateDisconnected DISCONNECTED
|
|
||||||
NodeStateDisconnected NodeState = "disconnected"
|
|
||||||
)
|
|
31
vendor/github.com/docker/docker/api/types/swarm/secret.go
generated
vendored
31
vendor/github.com/docker/docker/api/types/swarm/secret.go
generated
vendored
|
@ -1,31 +0,0 @@
|
||||||
package swarm
|
|
||||||
|
|
||||||
import "os"
|
|
||||||
|
|
||||||
// Secret represents a secret.
|
|
||||||
type Secret struct {
|
|
||||||
ID string
|
|
||||||
Meta
|
|
||||||
Spec SecretSpec
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecretSpec represents a secret specification from a secret in swarm
|
|
||||||
type SecretSpec struct {
|
|
||||||
Annotations
|
|
||||||
Data []byte `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecretReferenceFileTarget is a file target in a secret reference
|
|
||||||
type SecretReferenceFileTarget struct {
|
|
||||||
Name string
|
|
||||||
UID string
|
|
||||||
GID string
|
|
||||||
Mode os.FileMode
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecretReference is a reference to a secret in swarm
|
|
||||||
type SecretReference struct {
|
|
||||||
File *SecretReferenceFileTarget
|
|
||||||
SecretID string
|
|
||||||
SecretName string
|
|
||||||
}
|
|
105
vendor/github.com/docker/docker/api/types/swarm/service.go
generated
vendored
105
vendor/github.com/docker/docker/api/types/swarm/service.go
generated
vendored
|
@ -1,105 +0,0 @@
|
||||||
package swarm
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// Service represents a service.
|
|
||||||
type Service struct {
|
|
||||||
ID string
|
|
||||||
Meta
|
|
||||||
Spec ServiceSpec `json:",omitempty"`
|
|
||||||
PreviousSpec *ServiceSpec `json:",omitempty"`
|
|
||||||
Endpoint Endpoint `json:",omitempty"`
|
|
||||||
UpdateStatus UpdateStatus `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceSpec represents the spec of a service.
|
|
||||||
type ServiceSpec struct {
|
|
||||||
Annotations
|
|
||||||
|
|
||||||
// TaskTemplate defines how the service should construct new tasks when
|
|
||||||
// orchestrating this service.
|
|
||||||
TaskTemplate TaskSpec `json:",omitempty"`
|
|
||||||
Mode ServiceMode `json:",omitempty"`
|
|
||||||
UpdateConfig *UpdateConfig `json:",omitempty"`
|
|
||||||
|
|
||||||
// Networks field in ServiceSpec is deprecated. The
|
|
||||||
// same field in TaskSpec should be used instead.
|
|
||||||
// This field will be removed in a future release.
|
|
||||||
Networks []NetworkAttachmentConfig `json:",omitempty"`
|
|
||||||
EndpointSpec *EndpointSpec `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceMode represents the mode of a service.
|
|
||||||
type ServiceMode struct {
|
|
||||||
Replicated *ReplicatedService `json:",omitempty"`
|
|
||||||
Global *GlobalService `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateState is the state of a service update.
|
|
||||||
type UpdateState string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// UpdateStateUpdating is the updating state.
|
|
||||||
UpdateStateUpdating UpdateState = "updating"
|
|
||||||
// UpdateStatePaused is the paused state.
|
|
||||||
UpdateStatePaused UpdateState = "paused"
|
|
||||||
// UpdateStateCompleted is the completed state.
|
|
||||||
UpdateStateCompleted UpdateState = "completed"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UpdateStatus reports the status of a service update.
|
|
||||||
type UpdateStatus struct {
|
|
||||||
State UpdateState `json:",omitempty"`
|
|
||||||
StartedAt time.Time `json:",omitempty"`
|
|
||||||
CompletedAt time.Time `json:",omitempty"`
|
|
||||||
Message string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplicatedService is a kind of ServiceMode.
|
|
||||||
type ReplicatedService struct {
|
|
||||||
Replicas *uint64 `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GlobalService is a kind of ServiceMode.
|
|
||||||
type GlobalService struct{}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// UpdateFailureActionPause PAUSE
|
|
||||||
UpdateFailureActionPause = "pause"
|
|
||||||
// UpdateFailureActionContinue CONTINUE
|
|
||||||
UpdateFailureActionContinue = "continue"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UpdateConfig represents the update configuration.
|
|
||||||
type UpdateConfig struct {
|
|
||||||
// Maximum number of tasks to be updated in one iteration.
|
|
||||||
// 0 means unlimited parallelism.
|
|
||||||
Parallelism uint64
|
|
||||||
|
|
||||||
// Amount of time between updates.
|
|
||||||
Delay time.Duration `json:",omitempty"`
|
|
||||||
|
|
||||||
// FailureAction is the action to take when an update failures.
|
|
||||||
FailureAction string `json:",omitempty"`
|
|
||||||
|
|
||||||
// Monitor indicates how long to monitor a task for failure after it is
|
|
||||||
// created. If the task fails by ending up in one of the states
|
|
||||||
// REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
|
|
||||||
// this counts as a failure. If it fails after Monitor, it does not
|
|
||||||
// count as a failure. If Monitor is unspecified, a default value will
|
|
||||||
// be used.
|
|
||||||
Monitor time.Duration `json:",omitempty"`
|
|
||||||
|
|
||||||
// MaxFailureRatio is the fraction of tasks that may fail during
|
|
||||||
// an update before the failure action is invoked. Any task created by
|
|
||||||
// the current update which ends up in one of the states REJECTED,
|
|
||||||
// COMPLETED or FAILED within Monitor from its creation counts as a
|
|
||||||
// failure. The number of failures is divided by the number of tasks
|
|
||||||
// being updated, and if this fraction is greater than
|
|
||||||
// MaxFailureRatio, the failure action is invoked.
|
|
||||||
//
|
|
||||||
// If the failure action is CONTINUE, there is no effect.
|
|
||||||
// If the failure action is PAUSE, no more tasks will be updated until
|
|
||||||
// another update is started.
|
|
||||||
MaxFailureRatio float32
|
|
||||||
}
|
|
197
vendor/github.com/docker/docker/api/types/swarm/swarm.go
generated
vendored
197
vendor/github.com/docker/docker/api/types/swarm/swarm.go
generated
vendored
|
@ -1,197 +0,0 @@
|
||||||
package swarm
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// ClusterInfo represents info about the cluster for outputing in "info"
|
|
||||||
// it contains the same information as "Swarm", but without the JoinTokens
|
|
||||||
type ClusterInfo struct {
|
|
||||||
ID string
|
|
||||||
Meta
|
|
||||||
Spec Spec
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swarm represents a swarm.
|
|
||||||
type Swarm struct {
|
|
||||||
ClusterInfo
|
|
||||||
JoinTokens JoinTokens
|
|
||||||
}
|
|
||||||
|
|
||||||
// JoinTokens contains the tokens workers and managers need to join the swarm.
|
|
||||||
type JoinTokens struct {
|
|
||||||
// Worker is the join token workers may use to join the swarm.
|
|
||||||
Worker string
|
|
||||||
// Manager is the join token managers may use to join the swarm.
|
|
||||||
Manager string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Spec represents the spec of a swarm.
|
|
||||||
type Spec struct {
|
|
||||||
Annotations
|
|
||||||
|
|
||||||
Orchestration OrchestrationConfig `json:",omitempty"`
|
|
||||||
Raft RaftConfig `json:",omitempty"`
|
|
||||||
Dispatcher DispatcherConfig `json:",omitempty"`
|
|
||||||
CAConfig CAConfig `json:",omitempty"`
|
|
||||||
TaskDefaults TaskDefaults `json:",omitempty"`
|
|
||||||
EncryptionConfig EncryptionConfig `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrchestrationConfig represents orchestration configuration.
|
|
||||||
type OrchestrationConfig struct {
|
|
||||||
// TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
|
|
||||||
// node. If negative, never remove completed or failed tasks.
|
|
||||||
TaskHistoryRetentionLimit *int64 `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TaskDefaults parameterizes cluster-level task creation with default values.
|
|
||||||
type TaskDefaults struct {
|
|
||||||
// LogDriver selects the log driver to use for tasks created in the
|
|
||||||
// orchestrator if unspecified by a service.
|
|
||||||
//
|
|
||||||
// Updating this value will only have an affect on new tasks. Old tasks
|
|
||||||
// will continue use their previously configured log driver until
|
|
||||||
// recreated.
|
|
||||||
LogDriver *Driver `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncryptionConfig controls at-rest encryption of data and keys.
|
|
||||||
type EncryptionConfig struct {
|
|
||||||
// AutoLockManagers specifies whether or not managers TLS keys and raft data
|
|
||||||
// should be encrypted at rest in such a way that they must be unlocked
|
|
||||||
// before the manager node starts up again.
|
|
||||||
AutoLockManagers bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// RaftConfig represents raft configuration.
|
|
||||||
type RaftConfig struct {
|
|
||||||
// SnapshotInterval is the number of log entries between snapshots.
|
|
||||||
SnapshotInterval uint64 `json:",omitempty"`
|
|
||||||
|
|
||||||
// KeepOldSnapshots is the number of snapshots to keep beyond the
|
|
||||||
// current snapshot.
|
|
||||||
KeepOldSnapshots *uint64 `json:",omitempty"`
|
|
||||||
|
|
||||||
// LogEntriesForSlowFollowers is the number of log entries to keep
|
|
||||||
// around to sync up slow followers after a snapshot is created.
|
|
||||||
LogEntriesForSlowFollowers uint64 `json:",omitempty"`
|
|
||||||
|
|
||||||
// ElectionTick is the number of ticks that a follower will wait for a message
|
|
||||||
// from the leader before becoming a candidate and starting an election.
|
|
||||||
// ElectionTick must be greater than HeartbeatTick.
|
|
||||||
//
|
|
||||||
// A tick currently defaults to one second, so these translate directly to
|
|
||||||
// seconds currently, but this is NOT guaranteed.
|
|
||||||
ElectionTick int
|
|
||||||
|
|
||||||
// HeartbeatTick is the number of ticks between heartbeats. Every
|
|
||||||
// HeartbeatTick ticks, the leader will send a heartbeat to the
|
|
||||||
// followers.
|
|
||||||
//
|
|
||||||
// A tick currently defaults to one second, so these translate directly to
|
|
||||||
// seconds currently, but this is NOT guaranteed.
|
|
||||||
HeartbeatTick int
|
|
||||||
}
|
|
||||||
|
|
||||||
// DispatcherConfig represents dispatcher configuration.
|
|
||||||
type DispatcherConfig struct {
|
|
||||||
// HeartbeatPeriod defines how often agent should send heartbeats to
|
|
||||||
// dispatcher.
|
|
||||||
HeartbeatPeriod time.Duration `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CAConfig represents CA configuration.
|
|
||||||
type CAConfig struct {
|
|
||||||
// NodeCertExpiry is the duration certificates should be issued for
|
|
||||||
NodeCertExpiry time.Duration `json:",omitempty"`
|
|
||||||
|
|
||||||
// ExternalCAs is a list of CAs to which a manager node will make
|
|
||||||
// certificate signing requests for node certificates.
|
|
||||||
ExternalCAs []*ExternalCA `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExternalCAProtocol represents type of external CA.
|
|
||||||
type ExternalCAProtocol string
|
|
||||||
|
|
||||||
// ExternalCAProtocolCFSSL CFSSL
|
|
||||||
const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
|
|
||||||
|
|
||||||
// ExternalCA defines external CA to be used by the cluster.
|
|
||||||
type ExternalCA struct {
|
|
||||||
// Protocol is the protocol used by this external CA.
|
|
||||||
Protocol ExternalCAProtocol
|
|
||||||
|
|
||||||
// URL is the URL where the external CA can be reached.
|
|
||||||
URL string
|
|
||||||
|
|
||||||
// Options is a set of additional key/value pairs whose interpretation
|
|
||||||
// depends on the specified CA type.
|
|
||||||
Options map[string]string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitRequest is the request used to init a swarm.
|
|
||||||
type InitRequest struct {
|
|
||||||
ListenAddr string
|
|
||||||
AdvertiseAddr string
|
|
||||||
ForceNewCluster bool
|
|
||||||
Spec Spec
|
|
||||||
AutoLockManagers bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// JoinRequest is the request used to join a swarm.
|
|
||||||
type JoinRequest struct {
|
|
||||||
ListenAddr string
|
|
||||||
AdvertiseAddr string
|
|
||||||
RemoteAddrs []string
|
|
||||||
JoinToken string // accept by secret
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnlockRequest is the request used to unlock a swarm.
|
|
||||||
type UnlockRequest struct {
|
|
||||||
// UnlockKey is the unlock key in ASCII-armored format.
|
|
||||||
UnlockKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
// LocalNodeState represents the state of the local node.
|
|
||||||
type LocalNodeState string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// LocalNodeStateInactive INACTIVE
|
|
||||||
LocalNodeStateInactive LocalNodeState = "inactive"
|
|
||||||
// LocalNodeStatePending PENDING
|
|
||||||
LocalNodeStatePending LocalNodeState = "pending"
|
|
||||||
// LocalNodeStateActive ACTIVE
|
|
||||||
LocalNodeStateActive LocalNodeState = "active"
|
|
||||||
// LocalNodeStateError ERROR
|
|
||||||
LocalNodeStateError LocalNodeState = "error"
|
|
||||||
// LocalNodeStateLocked LOCKED
|
|
||||||
LocalNodeStateLocked LocalNodeState = "locked"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Info represents generic information about swarm.
|
|
||||||
type Info struct {
|
|
||||||
NodeID string
|
|
||||||
NodeAddr string
|
|
||||||
|
|
||||||
LocalNodeState LocalNodeState
|
|
||||||
ControlAvailable bool
|
|
||||||
Error string
|
|
||||||
|
|
||||||
RemoteManagers []Peer
|
|
||||||
Nodes int
|
|
||||||
Managers int
|
|
||||||
|
|
||||||
Cluster ClusterInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Peer represents a peer.
|
|
||||||
type Peer struct {
|
|
||||||
NodeID string
|
|
||||||
Addr string
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateFlags contains flags for SwarmUpdate.
|
|
||||||
type UpdateFlags struct {
|
|
||||||
RotateWorkerToken bool
|
|
||||||
RotateManagerToken bool
|
|
||||||
RotateManagerUnlockKey bool
|
|
||||||
}
|
|
128
vendor/github.com/docker/docker/api/types/swarm/task.go
generated
vendored
128
vendor/github.com/docker/docker/api/types/swarm/task.go
generated
vendored
|
@ -1,128 +0,0 @@
|
||||||
package swarm
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// TaskState represents the state of a task.
|
|
||||||
type TaskState string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// TaskStateNew NEW
|
|
||||||
TaskStateNew TaskState = "new"
|
|
||||||
// TaskStateAllocated ALLOCATED
|
|
||||||
TaskStateAllocated TaskState = "allocated"
|
|
||||||
// TaskStatePending PENDING
|
|
||||||
TaskStatePending TaskState = "pending"
|
|
||||||
// TaskStateAssigned ASSIGNED
|
|
||||||
TaskStateAssigned TaskState = "assigned"
|
|
||||||
// TaskStateAccepted ACCEPTED
|
|
||||||
TaskStateAccepted TaskState = "accepted"
|
|
||||||
// TaskStatePreparing PREPARING
|
|
||||||
TaskStatePreparing TaskState = "preparing"
|
|
||||||
// TaskStateReady READY
|
|
||||||
TaskStateReady TaskState = "ready"
|
|
||||||
// TaskStateStarting STARTING
|
|
||||||
TaskStateStarting TaskState = "starting"
|
|
||||||
// TaskStateRunning RUNNING
|
|
||||||
TaskStateRunning TaskState = "running"
|
|
||||||
// TaskStateComplete COMPLETE
|
|
||||||
TaskStateComplete TaskState = "complete"
|
|
||||||
// TaskStateShutdown SHUTDOWN
|
|
||||||
TaskStateShutdown TaskState = "shutdown"
|
|
||||||
// TaskStateFailed FAILED
|
|
||||||
TaskStateFailed TaskState = "failed"
|
|
||||||
// TaskStateRejected REJECTED
|
|
||||||
TaskStateRejected TaskState = "rejected"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Task represents a task.
|
|
||||||
type Task struct {
|
|
||||||
ID string
|
|
||||||
Meta
|
|
||||||
Annotations
|
|
||||||
|
|
||||||
Spec TaskSpec `json:",omitempty"`
|
|
||||||
ServiceID string `json:",omitempty"`
|
|
||||||
Slot int `json:",omitempty"`
|
|
||||||
NodeID string `json:",omitempty"`
|
|
||||||
Status TaskStatus `json:",omitempty"`
|
|
||||||
DesiredState TaskState `json:",omitempty"`
|
|
||||||
NetworksAttachments []NetworkAttachment `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TaskSpec represents the spec of a task.
|
|
||||||
type TaskSpec struct {
|
|
||||||
ContainerSpec ContainerSpec `json:",omitempty"`
|
|
||||||
Resources *ResourceRequirements `json:",omitempty"`
|
|
||||||
RestartPolicy *RestartPolicy `json:",omitempty"`
|
|
||||||
Placement *Placement `json:",omitempty"`
|
|
||||||
Networks []NetworkAttachmentConfig `json:",omitempty"`
|
|
||||||
|
|
||||||
// LogDriver specifies the LogDriver to use for tasks created from this
|
|
||||||
// spec. If not present, the one on cluster default on swarm.Spec will be
|
|
||||||
// used, finally falling back to the engine default if not specified.
|
|
||||||
LogDriver *Driver `json:",omitempty"`
|
|
||||||
|
|
||||||
// ForceUpdate is a counter that triggers an update even if no relevant
|
|
||||||
// parameters have been changed.
|
|
||||||
ForceUpdate uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resources represents resources (CPU/Memory).
|
|
||||||
type Resources struct {
|
|
||||||
NanoCPUs int64 `json:",omitempty"`
|
|
||||||
MemoryBytes int64 `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResourceRequirements represents resources requirements.
|
|
||||||
type ResourceRequirements struct {
|
|
||||||
Limits *Resources `json:",omitempty"`
|
|
||||||
Reservations *Resources `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Placement represents orchestration parameters.
|
|
||||||
type Placement struct {
|
|
||||||
Constraints []string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestartPolicy represents the restart policy.
|
|
||||||
type RestartPolicy struct {
|
|
||||||
Condition RestartPolicyCondition `json:",omitempty"`
|
|
||||||
Delay *time.Duration `json:",omitempty"`
|
|
||||||
MaxAttempts *uint64 `json:",omitempty"`
|
|
||||||
Window *time.Duration `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestartPolicyCondition represents when to restart.
|
|
||||||
type RestartPolicyCondition string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// RestartPolicyConditionNone NONE
|
|
||||||
RestartPolicyConditionNone RestartPolicyCondition = "none"
|
|
||||||
// RestartPolicyConditionOnFailure ON_FAILURE
|
|
||||||
RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure"
|
|
||||||
// RestartPolicyConditionAny ANY
|
|
||||||
RestartPolicyConditionAny RestartPolicyCondition = "any"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TaskStatus represents the status of a task.
|
|
||||||
type TaskStatus struct {
|
|
||||||
Timestamp time.Time `json:",omitempty"`
|
|
||||||
State TaskState `json:",omitempty"`
|
|
||||||
Message string `json:",omitempty"`
|
|
||||||
Err string `json:",omitempty"`
|
|
||||||
ContainerStatus ContainerStatus `json:",omitempty"`
|
|
||||||
PortStatus PortStatus `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerStatus represents the status of a container.
|
|
||||||
type ContainerStatus struct {
|
|
||||||
ContainerID string `json:",omitempty"`
|
|
||||||
PID int `json:",omitempty"`
|
|
||||||
ExitCode int `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PortStatus represents the port status of a task's host ports whose
|
|
||||||
// service has published host ports
|
|
||||||
type PortStatus struct {
|
|
||||||
Ports []PortConfig `json:",omitempty"`
|
|
||||||
}
|
|
549
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
549
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
|
@ -1,549 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
|
||||||
"github.com/docker/docker/api/types/mount"
|
|
||||||
"github.com/docker/docker/api/types/network"
|
|
||||||
"github.com/docker/docker/api/types/registry"
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
|
||||||
"github.com/docker/go-connections/nat"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ContainerChange contains response of Engine API:
|
|
||||||
// GET "/containers/{name:.*}/changes"
|
|
||||||
type ContainerChange struct {
|
|
||||||
Kind int
|
|
||||||
Path string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageHistory contains response of Engine API:
|
|
||||||
// GET "/images/{name:.*}/history"
|
|
||||||
type ImageHistory struct {
|
|
||||||
ID string `json:"Id"`
|
|
||||||
Created int64
|
|
||||||
CreatedBy string
|
|
||||||
Tags []string
|
|
||||||
Size int64
|
|
||||||
Comment string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageDelete contains response of Engine API:
|
|
||||||
// DELETE "/images/{name:.*}"
|
|
||||||
type ImageDelete struct {
|
|
||||||
Untagged string `json:",omitempty"`
|
|
||||||
Deleted string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GraphDriverData returns Image's graph driver config info
|
|
||||||
// when calling inspect command
|
|
||||||
type GraphDriverData struct {
|
|
||||||
Name string
|
|
||||||
Data map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// RootFS returns Image's RootFS description including the layer IDs.
|
|
||||||
type RootFS struct {
|
|
||||||
Type string
|
|
||||||
Layers []string `json:",omitempty"`
|
|
||||||
BaseLayer string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageInspect contains response of Engine API:
|
|
||||||
// GET "/images/{name:.*}/json"
|
|
||||||
type ImageInspect struct {
|
|
||||||
ID string `json:"Id"`
|
|
||||||
RepoTags []string
|
|
||||||
RepoDigests []string
|
|
||||||
Parent string
|
|
||||||
Comment string
|
|
||||||
Created string
|
|
||||||
Container string
|
|
||||||
ContainerConfig *container.Config
|
|
||||||
DockerVersion string
|
|
||||||
Author string
|
|
||||||
Config *container.Config
|
|
||||||
Architecture string
|
|
||||||
Os string
|
|
||||||
OsVersion string `json:",omitempty"`
|
|
||||||
Size int64
|
|
||||||
VirtualSize int64
|
|
||||||
GraphDriver GraphDriverData
|
|
||||||
RootFS RootFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// Container contains response of Engine API:
|
|
||||||
// GET "/containers/json"
|
|
||||||
type Container struct {
|
|
||||||
ID string `json:"Id"`
|
|
||||||
Names []string
|
|
||||||
Image string
|
|
||||||
ImageID string
|
|
||||||
Command string
|
|
||||||
Created int64
|
|
||||||
Ports []Port
|
|
||||||
SizeRw int64 `json:",omitempty"`
|
|
||||||
SizeRootFs int64 `json:",omitempty"`
|
|
||||||
Labels map[string]string
|
|
||||||
State string
|
|
||||||
Status string
|
|
||||||
HostConfig struct {
|
|
||||||
NetworkMode string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
NetworkSettings *SummaryNetworkSettings
|
|
||||||
Mounts []MountPoint
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyConfig contains request body of Engine API:
|
|
||||||
// POST "/containers/"+containerID+"/copy"
|
|
||||||
type CopyConfig struct {
|
|
||||||
Resource string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerPathStat is used to encode the header from
|
|
||||||
// GET "/containers/{name:.*}/archive"
|
|
||||||
// "Name" is the file or directory name.
|
|
||||||
type ContainerPathStat struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Mode os.FileMode `json:"mode"`
|
|
||||||
Mtime time.Time `json:"mtime"`
|
|
||||||
LinkTarget string `json:"linkTarget"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerStats contains response of Engine API:
|
|
||||||
// GET "/stats"
|
|
||||||
type ContainerStats struct {
|
|
||||||
Body io.ReadCloser `json:"body"`
|
|
||||||
OSType string `json:"ostype"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerProcessList contains response of Engine API:
|
|
||||||
// GET "/containers/{name:.*}/top"
|
|
||||||
type ContainerProcessList struct {
|
|
||||||
Processes [][]string
|
|
||||||
Titles []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ping contains response of Engine API:
|
|
||||||
// GET "/_ping"
|
|
||||||
type Ping struct {
|
|
||||||
APIVersion string
|
|
||||||
Experimental bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version contains response of Engine API:
|
|
||||||
// GET "/version"
|
|
||||||
type Version struct {
|
|
||||||
Version string
|
|
||||||
APIVersion string `json:"ApiVersion"`
|
|
||||||
MinAPIVersion string `json:"MinAPIVersion,omitempty"`
|
|
||||||
GitCommit string
|
|
||||||
GoVersion string
|
|
||||||
Os string
|
|
||||||
Arch string
|
|
||||||
KernelVersion string `json:",omitempty"`
|
|
||||||
Experimental bool `json:",omitempty"`
|
|
||||||
BuildTime string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit records a external tool actual commit id version along the
|
|
||||||
// one expect by dockerd as set at build time
|
|
||||||
type Commit struct {
|
|
||||||
ID string
|
|
||||||
Expected string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Info contains response of Engine API:
|
|
||||||
// GET "/info"
|
|
||||||
type Info struct {
|
|
||||||
ID string
|
|
||||||
Containers int
|
|
||||||
ContainersRunning int
|
|
||||||
ContainersPaused int
|
|
||||||
ContainersStopped int
|
|
||||||
Images int
|
|
||||||
Driver string
|
|
||||||
DriverStatus [][2]string
|
|
||||||
SystemStatus [][2]string
|
|
||||||
Plugins PluginsInfo
|
|
||||||
MemoryLimit bool
|
|
||||||
SwapLimit bool
|
|
||||||
KernelMemory bool
|
|
||||||
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
|
|
||||||
CPUCfsQuota bool `json:"CpuCfsQuota"`
|
|
||||||
CPUShares bool
|
|
||||||
CPUSet bool
|
|
||||||
IPv4Forwarding bool
|
|
||||||
BridgeNfIptables bool
|
|
||||||
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
|
|
||||||
Debug bool
|
|
||||||
NFd int
|
|
||||||
OomKillDisable bool
|
|
||||||
NGoroutines int
|
|
||||||
SystemTime string
|
|
||||||
LoggingDriver string
|
|
||||||
CgroupDriver string
|
|
||||||
NEventsListener int
|
|
||||||
KernelVersion string
|
|
||||||
OperatingSystem string
|
|
||||||
OSType string
|
|
||||||
Architecture string
|
|
||||||
IndexServerAddress string
|
|
||||||
RegistryConfig *registry.ServiceConfig
|
|
||||||
NCPU int
|
|
||||||
MemTotal int64
|
|
||||||
DockerRootDir string
|
|
||||||
HTTPProxy string `json:"HttpProxy"`
|
|
||||||
HTTPSProxy string `json:"HttpsProxy"`
|
|
||||||
NoProxy string
|
|
||||||
Name string
|
|
||||||
Labels []string
|
|
||||||
ExperimentalBuild bool
|
|
||||||
ServerVersion string
|
|
||||||
ClusterStore string
|
|
||||||
ClusterAdvertise string
|
|
||||||
Runtimes map[string]Runtime
|
|
||||||
DefaultRuntime string
|
|
||||||
Swarm swarm.Info
|
|
||||||
// LiveRestoreEnabled determines whether containers should be kept
|
|
||||||
// running when the daemon is shutdown or upon daemon start if
|
|
||||||
// running containers are detected
|
|
||||||
LiveRestoreEnabled bool
|
|
||||||
Isolation container.Isolation
|
|
||||||
InitBinary string
|
|
||||||
ContainerdCommit Commit
|
|
||||||
RuncCommit Commit
|
|
||||||
InitCommit Commit
|
|
||||||
SecurityOptions []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeyValue holds a key/value pair
|
|
||||||
type KeyValue struct {
|
|
||||||
Key, Value string
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecurityOpt contains the name and options of a security option
|
|
||||||
type SecurityOpt struct {
|
|
||||||
Name string
|
|
||||||
Options []KeyValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeSecurityOptions decodes a security options string slice to a type safe
|
|
||||||
// SecurityOpt
|
|
||||||
func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
|
|
||||||
so := []SecurityOpt{}
|
|
||||||
for _, opt := range opts {
|
|
||||||
// support output from a < 1.13 docker daemon
|
|
||||||
if !strings.Contains(opt, "=") {
|
|
||||||
so = append(so, SecurityOpt{Name: opt})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
secopt := SecurityOpt{}
|
|
||||||
split := strings.Split(opt, ",")
|
|
||||||
for _, s := range split {
|
|
||||||
kv := strings.SplitN(s, "=", 2)
|
|
||||||
if len(kv) != 2 {
|
|
||||||
return nil, fmt.Errorf("invalid security option %q", s)
|
|
||||||
}
|
|
||||||
if kv[0] == "" || kv[1] == "" {
|
|
||||||
return nil, errors.New("invalid empty security option")
|
|
||||||
}
|
|
||||||
if kv[0] == "name" {
|
|
||||||
secopt.Name = kv[1]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]})
|
|
||||||
}
|
|
||||||
so = append(so, secopt)
|
|
||||||
}
|
|
||||||
return so, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PluginsInfo is a temp struct holding Plugins name
|
|
||||||
// registered with docker daemon. It is used by Info struct
|
|
||||||
type PluginsInfo struct {
|
|
||||||
// List of Volume plugins registered
|
|
||||||
Volume []string
|
|
||||||
// List of Network plugins registered
|
|
||||||
Network []string
|
|
||||||
// List of Authorization plugins registered
|
|
||||||
Authorization []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecStartCheck is a temp struct used by execStart
|
|
||||||
// Config fields is part of ExecConfig in runconfig package
|
|
||||||
type ExecStartCheck struct {
|
|
||||||
// ExecStart will first check if it's detached
|
|
||||||
Detach bool
|
|
||||||
// Check if there's a tty
|
|
||||||
Tty bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// HealthcheckResult stores information about a single run of a healthcheck probe
|
|
||||||
type HealthcheckResult struct {
|
|
||||||
Start time.Time // Start is the time this check started
|
|
||||||
End time.Time // End is the time this check ended
|
|
||||||
ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
|
|
||||||
Output string // Output from last check
|
|
||||||
}
|
|
||||||
|
|
||||||
// Health states
|
|
||||||
const (
|
|
||||||
NoHealthcheck = "none" // Indicates there is no healthcheck
|
|
||||||
Starting = "starting" // Starting indicates that the container is not yet ready
|
|
||||||
Healthy = "healthy" // Healthy indicates that the container is running correctly
|
|
||||||
Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
|
|
||||||
)
|
|
||||||
|
|
||||||
// Health stores information about the container's healthcheck results
|
|
||||||
type Health struct {
|
|
||||||
Status string // Status is one of Starting, Healthy or Unhealthy
|
|
||||||
FailingStreak int // FailingStreak is the number of consecutive failures
|
|
||||||
Log []*HealthcheckResult // Log contains the last few results (oldest first)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerState stores container's running state
|
|
||||||
// it's part of ContainerJSONBase and will return by "inspect" command
|
|
||||||
type ContainerState struct {
|
|
||||||
Status string
|
|
||||||
Running bool
|
|
||||||
Paused bool
|
|
||||||
Restarting bool
|
|
||||||
OOMKilled bool
|
|
||||||
Dead bool
|
|
||||||
Pid int
|
|
||||||
ExitCode int
|
|
||||||
Error string
|
|
||||||
StartedAt string
|
|
||||||
FinishedAt string
|
|
||||||
Health *Health `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerNode stores information about the node that a container
|
|
||||||
// is running on. It's only available in Docker Swarm
|
|
||||||
type ContainerNode struct {
|
|
||||||
ID string
|
|
||||||
IPAddress string `json:"IP"`
|
|
||||||
Addr string
|
|
||||||
Name string
|
|
||||||
Cpus int
|
|
||||||
Memory int64
|
|
||||||
Labels map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerJSONBase contains response of Engine API:
|
|
||||||
// GET "/containers/{name:.*}/json"
|
|
||||||
type ContainerJSONBase struct {
|
|
||||||
ID string `json:"Id"`
|
|
||||||
Created string
|
|
||||||
Path string
|
|
||||||
Args []string
|
|
||||||
State *ContainerState
|
|
||||||
Image string
|
|
||||||
ResolvConfPath string
|
|
||||||
HostnamePath string
|
|
||||||
HostsPath string
|
|
||||||
LogPath string
|
|
||||||
Node *ContainerNode `json:",omitempty"`
|
|
||||||
Name string
|
|
||||||
RestartCount int
|
|
||||||
Driver string
|
|
||||||
MountLabel string
|
|
||||||
ProcessLabel string
|
|
||||||
AppArmorProfile string
|
|
||||||
ExecIDs []string
|
|
||||||
HostConfig *container.HostConfig
|
|
||||||
GraphDriver GraphDriverData
|
|
||||||
SizeRw *int64 `json:",omitempty"`
|
|
||||||
SizeRootFs *int64 `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerJSON is newly used struct along with MountPoint
|
|
||||||
type ContainerJSON struct {
|
|
||||||
*ContainerJSONBase
|
|
||||||
Mounts []MountPoint
|
|
||||||
Config *container.Config
|
|
||||||
NetworkSettings *NetworkSettings
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkSettings exposes the network settings in the api
|
|
||||||
type NetworkSettings struct {
|
|
||||||
NetworkSettingsBase
|
|
||||||
DefaultNetworkSettings
|
|
||||||
Networks map[string]*network.EndpointSettings
|
|
||||||
}
|
|
||||||
|
|
||||||
// SummaryNetworkSettings provides a summary of container's networks
|
|
||||||
// in /containers/json
|
|
||||||
type SummaryNetworkSettings struct {
|
|
||||||
Networks map[string]*network.EndpointSettings
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkSettingsBase holds basic information about networks
|
|
||||||
type NetworkSettingsBase struct {
|
|
||||||
Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`)
|
|
||||||
SandboxID string // SandboxID uniquely represents a container's network stack
|
|
||||||
HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
|
|
||||||
LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
|
|
||||||
LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
|
|
||||||
Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
|
|
||||||
SandboxKey string // SandboxKey identifies the sandbox
|
|
||||||
SecondaryIPAddresses []network.Address
|
|
||||||
SecondaryIPv6Addresses []network.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultNetworkSettings holds network information
|
|
||||||
// during the 2 release deprecation period.
|
|
||||||
// It will be removed in Docker 1.11.
|
|
||||||
type DefaultNetworkSettings struct {
|
|
||||||
EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
|
|
||||||
Gateway string // Gateway holds the gateway address for the network
|
|
||||||
GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
|
|
||||||
GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
|
|
||||||
IPAddress string // IPAddress holds the IPv4 address for the network
|
|
||||||
IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
|
|
||||||
IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
|
|
||||||
MacAddress string // MacAddress holds the MAC address for the network
|
|
||||||
}
|
|
||||||
|
|
||||||
// MountPoint represents a mount point configuration inside the container.
|
|
||||||
// This is used for reporting the mountpoints in use by a container.
|
|
||||||
type MountPoint struct {
|
|
||||||
Type mount.Type `json:",omitempty"`
|
|
||||||
Name string `json:",omitempty"`
|
|
||||||
Source string
|
|
||||||
Destination string
|
|
||||||
Driver string `json:",omitempty"`
|
|
||||||
Mode string
|
|
||||||
RW bool
|
|
||||||
Propagation mount.Propagation
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkResource is the body of the "get network" http response message
|
|
||||||
type NetworkResource struct {
|
|
||||||
Name string // Name is the requested name of the network
|
|
||||||
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
|
|
||||||
Created time.Time // Created is the time the network created
|
|
||||||
Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level)
|
|
||||||
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
|
|
||||||
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
|
|
||||||
IPAM network.IPAM // IPAM is the network's IP Address Management
|
|
||||||
Internal bool // Internal represents if the network is used internal only
|
|
||||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
|
||||||
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
|
|
||||||
Options map[string]string // Options holds the network specific options to use for when creating the network
|
|
||||||
Labels map[string]string // Labels holds metadata specific to the network being created
|
|
||||||
Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
|
|
||||||
}
|
|
||||||
|
|
||||||
// EndpointResource contains network resources allocated and used for a container in a network
|
|
||||||
type EndpointResource struct {
|
|
||||||
Name string
|
|
||||||
EndpointID string
|
|
||||||
MacAddress string
|
|
||||||
IPv4Address string
|
|
||||||
IPv6Address string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkCreate is the expected body of the "create network" http request message
|
|
||||||
type NetworkCreate struct {
|
|
||||||
CheckDuplicate bool
|
|
||||||
Driver string
|
|
||||||
EnableIPv6 bool
|
|
||||||
IPAM *network.IPAM
|
|
||||||
Internal bool
|
|
||||||
Attachable bool
|
|
||||||
Options map[string]string
|
|
||||||
Labels map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkCreateRequest is the request message sent to the server for network create call.
|
|
||||||
type NetworkCreateRequest struct {
|
|
||||||
NetworkCreate
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkCreateResponse is the response message sent by the server for network create call
|
|
||||||
type NetworkCreateResponse struct {
|
|
||||||
ID string `json:"Id"`
|
|
||||||
Warning string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkConnect represents the data to be used to connect a container to the network
|
|
||||||
type NetworkConnect struct {
|
|
||||||
Container string
|
|
||||||
EndpointConfig *network.EndpointSettings `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkDisconnect represents the data to be used to disconnect a container from the network
|
|
||||||
type NetworkDisconnect struct {
|
|
||||||
Container string
|
|
||||||
Force bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checkpoint represents the details of a checkpoint
|
|
||||||
type Checkpoint struct {
|
|
||||||
Name string // Name is the name of the checkpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
// Runtime describes an OCI runtime
|
|
||||||
type Runtime struct {
|
|
||||||
Path string `json:"path"`
|
|
||||||
Args []string `json:"runtimeArgs,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DiskUsage contains response of Engine API:
|
|
||||||
// GET "/system/df"
|
|
||||||
type DiskUsage struct {
|
|
||||||
LayersSize int64
|
|
||||||
Images []*ImageSummary
|
|
||||||
Containers []*Container
|
|
||||||
Volumes []*Volume
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainersPruneReport contains the response for Engine API:
|
|
||||||
// POST "/containers/prune"
|
|
||||||
type ContainersPruneReport struct {
|
|
||||||
ContainersDeleted []string
|
|
||||||
SpaceReclaimed uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// VolumesPruneReport contains the response for Engine API:
|
|
||||||
// POST "/volumes/prune"
|
|
||||||
type VolumesPruneReport struct {
|
|
||||||
VolumesDeleted []string
|
|
||||||
SpaceReclaimed uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImagesPruneReport contains the response for Engine API:
|
|
||||||
// POST "/images/prune"
|
|
||||||
type ImagesPruneReport struct {
|
|
||||||
ImagesDeleted []ImageDelete
|
|
||||||
SpaceReclaimed uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworksPruneReport contains the response for Engine API:
|
|
||||||
// POST "/networks/prune"
|
|
||||||
type NetworksPruneReport struct {
|
|
||||||
NetworksDeleted []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecretCreateResponse contains the information returned to a client
|
|
||||||
// on the creation of a new secret.
|
|
||||||
type SecretCreateResponse struct {
|
|
||||||
// ID is the id of the created secret.
|
|
||||||
ID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecretListOptions holds parameters to list secrets
|
|
||||||
type SecretListOptions struct {
|
|
||||||
Filters filters.Args
|
|
||||||
}
|
|
62
vendor/github.com/docker/docker/api/types/versions/compare.go
generated
vendored
62
vendor/github.com/docker/docker/api/types/versions/compare.go
generated
vendored
|
@ -1,62 +0,0 @@
|
||||||
package versions
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// compare compares two version strings
|
|
||||||
// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
|
|
||||||
func compare(v1, v2 string) int {
|
|
||||||
var (
|
|
||||||
currTab = strings.Split(v1, ".")
|
|
||||||
otherTab = strings.Split(v2, ".")
|
|
||||||
)
|
|
||||||
|
|
||||||
max := len(currTab)
|
|
||||||
if len(otherTab) > max {
|
|
||||||
max = len(otherTab)
|
|
||||||
}
|
|
||||||
for i := 0; i < max; i++ {
|
|
||||||
var currInt, otherInt int
|
|
||||||
|
|
||||||
if len(currTab) > i {
|
|
||||||
currInt, _ = strconv.Atoi(currTab[i])
|
|
||||||
}
|
|
||||||
if len(otherTab) > i {
|
|
||||||
otherInt, _ = strconv.Atoi(otherTab[i])
|
|
||||||
}
|
|
||||||
if currInt > otherInt {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
if otherInt > currInt {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// LessThan checks if a version is less than another
|
|
||||||
func LessThan(v, other string) bool {
|
|
||||||
return compare(v, other) == -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// LessThanOrEqualTo checks if a version is less than or equal to another
|
|
||||||
func LessThanOrEqualTo(v, other string) bool {
|
|
||||||
return compare(v, other) <= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// GreaterThan checks if a version is greater than another
|
|
||||||
func GreaterThan(v, other string) bool {
|
|
||||||
return compare(v, other) == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// GreaterThanOrEqualTo checks if a version is greater than or equal to another
|
|
||||||
func GreaterThanOrEqualTo(v, other string) bool {
|
|
||||||
return compare(v, other) >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal checks if a version is equal to another
|
|
||||||
func Equal(v, other string) bool {
|
|
||||||
return compare(v, other) == 0
|
|
||||||
}
|
|
58
vendor/github.com/docker/docker/api/types/volume.go
generated
vendored
58
vendor/github.com/docker/docker/api/types/volume.go
generated
vendored
|
@ -1,58 +0,0 @@
|
||||||
package types
|
|
||||||
|
|
||||||
// This file was generated by the swagger tool.
|
|
||||||
// Editing this file might prove futile when you re-run the swagger generate command
|
|
||||||
|
|
||||||
// Volume volume
|
|
||||||
// swagger:model Volume
|
|
||||||
type Volume struct {
|
|
||||||
|
|
||||||
// Name of the volume driver used by the volume.
|
|
||||||
// Required: true
|
|
||||||
Driver string `json:"Driver"`
|
|
||||||
|
|
||||||
// User-defined key/value metadata.
|
|
||||||
// Required: true
|
|
||||||
Labels map[string]string `json:"Labels"`
|
|
||||||
|
|
||||||
// Mount path of the volume on the host.
|
|
||||||
// Required: true
|
|
||||||
Mountpoint string `json:"Mountpoint"`
|
|
||||||
|
|
||||||
// Name of the volume.
|
|
||||||
// Required: true
|
|
||||||
Name string `json:"Name"`
|
|
||||||
|
|
||||||
// The driver specific options used when creating the volume.
|
|
||||||
// Required: true
|
|
||||||
Options map[string]string `json:"Options"`
|
|
||||||
|
|
||||||
// The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.
|
|
||||||
// Required: true
|
|
||||||
Scope string `json:"Scope"`
|
|
||||||
|
|
||||||
// Low-level details about the volume, provided by the volume driver.
|
|
||||||
// Details are returned as a map with key/value pairs:
|
|
||||||
// `{"key":"value","key2":"value2"}`.
|
|
||||||
//
|
|
||||||
// The `Status` field is optional, and is omitted if the volume driver
|
|
||||||
// does not support this feature.
|
|
||||||
//
|
|
||||||
Status map[string]interface{} `json:"Status,omitempty"`
|
|
||||||
|
|
||||||
// usage data
|
|
||||||
UsageData *VolumeUsageData `json:"UsageData,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// VolumeUsageData volume usage data
|
|
||||||
// swagger:model VolumeUsageData
|
|
||||||
type VolumeUsageData struct {
|
|
||||||
|
|
||||||
// The number of containers referencing this volume.
|
|
||||||
// Required: true
|
|
||||||
RefCount int64 `json:"RefCount"`
|
|
||||||
|
|
||||||
// The disk space used by the volume (local driver only)
|
|
||||||
// Required: true
|
|
||||||
Size int64 `json:"Size"`
|
|
||||||
}
|
|
169
vendor/github.com/docker/docker/builder/builder.go
generated
vendored
169
vendor/github.com/docker/docker/builder/builder.go
generated
vendored
|
@ -1,169 +0,0 @@
|
||||||
// Package builder defines interfaces for any Docker builder to implement.
|
|
||||||
//
|
|
||||||
// Historically, only server-side Dockerfile interpreters existed.
|
|
||||||
// This package allows for other implementations of Docker builders.
|
|
||||||
package builder
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/backend"
|
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/docker/docker/image"
|
|
||||||
"github.com/docker/docker/reference"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build
|
|
||||||
DefaultDockerfileName string = "Dockerfile"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Context represents a file system tree.
|
|
||||||
type Context interface {
|
|
||||||
// Close allows to signal that the filesystem tree won't be used anymore.
|
|
||||||
// For Context implementations using a temporary directory, it is recommended to
|
|
||||||
// delete the temporary directory in Close().
|
|
||||||
Close() error
|
|
||||||
// Stat returns an entry corresponding to path if any.
|
|
||||||
// It is recommended to return an error if path was not found.
|
|
||||||
// If path is a symlink it also returns the path to the target file.
|
|
||||||
Stat(path string) (string, FileInfo, error)
|
|
||||||
// Open opens path from the context and returns a readable stream of it.
|
|
||||||
Open(path string) (io.ReadCloser, error)
|
|
||||||
// Walk walks the tree of the context with the function passed to it.
|
|
||||||
Walk(root string, walkFn WalkFunc) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// WalkFunc is the type of the function called for each file or directory visited by Context.Walk().
|
|
||||||
type WalkFunc func(path string, fi FileInfo, err error) error
|
|
||||||
|
|
||||||
// ModifiableContext represents a modifiable Context.
|
|
||||||
// TODO: remove this interface once we can get rid of Remove()
|
|
||||||
type ModifiableContext interface {
|
|
||||||
Context
|
|
||||||
// Remove deletes the entry specified by `path`.
|
|
||||||
// It is usual for directory entries to delete all its subentries.
|
|
||||||
Remove(path string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileInfo extends os.FileInfo to allow retrieving an absolute path to the file.
|
|
||||||
// TODO: remove this interface once pkg/archive exposes a walk function that Context can use.
|
|
||||||
type FileInfo interface {
|
|
||||||
os.FileInfo
|
|
||||||
Path() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// PathFileInfo is a convenience struct that implements the FileInfo interface.
|
|
||||||
type PathFileInfo struct {
|
|
||||||
os.FileInfo
|
|
||||||
// FilePath holds the absolute path to the file.
|
|
||||||
FilePath string
|
|
||||||
// Name holds the basename for the file.
|
|
||||||
FileName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Path returns the absolute path to the file.
|
|
||||||
func (fi PathFileInfo) Path() string {
|
|
||||||
return fi.FilePath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the basename of the file.
|
|
||||||
func (fi PathFileInfo) Name() string {
|
|
||||||
if fi.FileName != "" {
|
|
||||||
return fi.FileName
|
|
||||||
}
|
|
||||||
return fi.FileInfo.Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashed defines an extra method intended for implementations of os.FileInfo.
|
|
||||||
type Hashed interface {
|
|
||||||
// Hash returns the hash of a file.
|
|
||||||
Hash() string
|
|
||||||
SetHash(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashedFileInfo is a convenient struct that augments FileInfo with a field.
|
|
||||||
type HashedFileInfo struct {
|
|
||||||
FileInfo
|
|
||||||
// FileHash represents the hash of a file.
|
|
||||||
FileHash string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the hash of a file.
|
|
||||||
func (fi HashedFileInfo) Hash() string {
|
|
||||||
return fi.FileHash
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHash sets the hash of a file.
|
|
||||||
func (fi *HashedFileInfo) SetHash(h string) {
|
|
||||||
fi.FileHash = h
|
|
||||||
}
|
|
||||||
|
|
||||||
// Backend abstracts calls to a Docker Daemon.
|
|
||||||
type Backend interface {
|
|
||||||
// TODO: use digest reference instead of name
|
|
||||||
|
|
||||||
// GetImageOnBuild looks up a Docker image referenced by `name`.
|
|
||||||
GetImageOnBuild(name string) (Image, error)
|
|
||||||
// TagImage tags an image with newTag
|
|
||||||
TagImageWithReference(image.ID, reference.Named) error
|
|
||||||
// PullOnBuild tells Docker to pull image referenced by `name`.
|
|
||||||
PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error)
|
|
||||||
// ContainerAttachRaw attaches to container.
|
|
||||||
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error
|
|
||||||
// ContainerCreate creates a new Docker container and returns potential warnings
|
|
||||||
ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
|
|
||||||
// ContainerRm removes a container specified by `id`.
|
|
||||||
ContainerRm(name string, config *types.ContainerRmConfig) error
|
|
||||||
// Commit creates a new Docker image from an existing Docker container.
|
|
||||||
Commit(string, *backend.ContainerCommitConfig) (string, error)
|
|
||||||
// ContainerKill stops the container execution abruptly.
|
|
||||||
ContainerKill(containerID string, sig uint64) error
|
|
||||||
// ContainerStart starts a new container
|
|
||||||
ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
|
||||||
// ContainerWait stops processing until the given container is stopped.
|
|
||||||
ContainerWait(containerID string, timeout time.Duration) (int, error)
|
|
||||||
// ContainerUpdateCmdOnBuild updates container.Path and container.Args
|
|
||||||
ContainerUpdateCmdOnBuild(containerID string, cmd []string) error
|
|
||||||
// ContainerCreateWorkdir creates the workdir (currently only used on Windows)
|
|
||||||
ContainerCreateWorkdir(containerID string) error
|
|
||||||
|
|
||||||
// ContainerCopy copies/extracts a source FileInfo to a destination path inside a container
|
|
||||||
// specified by a container object.
|
|
||||||
// TODO: make an Extract method instead of passing `decompress`
|
|
||||||
// TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used
|
|
||||||
// with Context.Walk
|
|
||||||
// ContainerCopy(name string, res string) (io.ReadCloser, error)
|
|
||||||
// TODO: use copyBackend api
|
|
||||||
CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error
|
|
||||||
|
|
||||||
// HasExperimental checks if the backend supports experimental features
|
|
||||||
HasExperimental() bool
|
|
||||||
|
|
||||||
// SquashImage squashes the fs layers from the provided image down to the specified `to` image
|
|
||||||
SquashImage(from string, to string) (string, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Image represents a Docker image used by the builder.
|
|
||||||
type Image interface {
|
|
||||||
ImageID() string
|
|
||||||
RunConfig() *container.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageCacheBuilder represents a generator for stateful image cache.
|
|
||||||
type ImageCacheBuilder interface {
|
|
||||||
// MakeImageCache creates a stateful image cache.
|
|
||||||
MakeImageCache(cacheFrom []string) ImageCache
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageCache abstracts an image cache.
|
|
||||||
// (parent image, child runconfig) -> child image
|
|
||||||
type ImageCache interface {
|
|
||||||
// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent`
|
|
||||||
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
|
|
||||||
GetCache(parentID string, cfg *container.Config) (imageID string, err error)
|
|
||||||
}
|
|
260
vendor/github.com/docker/docker/builder/context.go
generated
vendored
260
vendor/github.com/docker/docker/builder/context.go
generated
vendored
|
@ -1,260 +0,0 @@
|
||||||
package builder
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/archive"
|
|
||||||
"github.com/docker/docker/pkg/fileutils"
|
|
||||||
"github.com/docker/docker/pkg/gitutils"
|
|
||||||
"github.com/docker/docker/pkg/httputils"
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
|
||||||
"github.com/docker/docker/pkg/progress"
|
|
||||||
"github.com/docker/docker/pkg/streamformatter"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidateContextDirectory checks if all the contents of the directory
|
|
||||||
// can be read and returns an error if some files can't be read
|
|
||||||
// symlinks which point to non-existing files don't trigger an error
|
|
||||||
func ValidateContextDirectory(srcPath string, excludes []string) error {
|
|
||||||
contextRoot, err := getContextRoot(srcPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
if os.IsPermission(err) {
|
|
||||||
return fmt.Errorf("can't stat '%s'", filePath)
|
|
||||||
}
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// skip this directory/file if it's not in the path, it won't get added to the context
|
|
||||||
if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil {
|
|
||||||
return err
|
|
||||||
} else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
|
|
||||||
return err
|
|
||||||
} else if skip {
|
|
||||||
if f.IsDir() {
|
|
||||||
return filepath.SkipDir
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// skip checking if symlinks point to non-existing files, such symlinks can be useful
|
|
||||||
// also skip named pipes, because they hanging on open
|
|
||||||
if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !f.IsDir() {
|
|
||||||
currentFile, err := os.Open(filePath)
|
|
||||||
if err != nil && os.IsPermission(err) {
|
|
||||||
return fmt.Errorf("no permission to read from '%s'", filePath)
|
|
||||||
}
|
|
||||||
currentFile.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContextFromReader will read the contents of the given reader as either a
|
|
||||||
// Dockerfile or tar archive. Returns a tar archive used as a context and a
|
|
||||||
// path to the Dockerfile inside the tar.
|
|
||||||
func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) {
|
|
||||||
buf := bufio.NewReader(r)
|
|
||||||
|
|
||||||
magic, err := buf.Peek(archive.HeaderSize)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if archive.IsArchive(magic) {
|
|
||||||
return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Input should be read as a Dockerfile.
|
|
||||||
tmpDir, err := ioutil.TempDir("", "docker-build-context-")
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName))
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
_, err = io.Copy(f, buf)
|
|
||||||
if err != nil {
|
|
||||||
f.Close()
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := f.Close(); err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
if err := r.Close(); err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
tar, err := archive.Tar(tmpDir, archive.Uncompressed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ioutils.NewReadCloserWrapper(tar, func() error {
|
|
||||||
err := tar.Close()
|
|
||||||
os.RemoveAll(tmpDir)
|
|
||||||
return err
|
|
||||||
}), DefaultDockerfileName, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContextFromGitURL uses a Git URL as context for a `docker build`. The
|
|
||||||
// git repo is cloned into a temporary directory used as the context directory.
|
|
||||||
// Returns the absolute path to the temporary context directory, the relative
|
|
||||||
// path of the dockerfile in that context directory, and a non-nil error on
|
|
||||||
// success.
|
|
||||||
func GetContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) {
|
|
||||||
if _, err := exec.LookPath("git"); err != nil {
|
|
||||||
return "", "", fmt.Errorf("unable to find 'git': %v", err)
|
|
||||||
}
|
|
||||||
if absContextDir, err = gitutils.Clone(gitURL); err != nil {
|
|
||||||
return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return getDockerfileRelPath(absContextDir, dockerfileName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContextFromURL uses a remote URL as context for a `docker build`. The
|
|
||||||
// remote resource is downloaded as either a Dockerfile or a tar archive.
|
|
||||||
// Returns the tar archive used for the context and a path of the
|
|
||||||
// dockerfile inside the tar.
|
|
||||||
func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) {
|
|
||||||
response, err := httputils.Download(remoteURL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err)
|
|
||||||
}
|
|
||||||
progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true)
|
|
||||||
|
|
||||||
// Pass the response body through a progress reader.
|
|
||||||
progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL))
|
|
||||||
|
|
||||||
return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContextFromLocalDir uses the given local directory as context for a
|
|
||||||
// `docker build`. Returns the absolute path to the local context directory,
|
|
||||||
// the relative path of the dockerfile in that context directory, and a non-nil
|
|
||||||
// error on success.
|
|
||||||
func GetContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) {
|
|
||||||
// When using a local context directory, when the Dockerfile is specified
|
|
||||||
// with the `-f/--file` option then it is considered relative to the
|
|
||||||
// current directory and not the context directory.
|
|
||||||
if dockerfileName != "" {
|
|
||||||
if dockerfileName, err = filepath.Abs(dockerfileName); err != nil {
|
|
||||||
return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return getDockerfileRelPath(localDir, dockerfileName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getDockerfileRelPath uses the given context directory for a `docker build`
|
|
||||||
// and returns the absolute path to the context directory, the relative path of
|
|
||||||
// the dockerfile in that context directory, and a non-nil error on success.
|
|
||||||
func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) {
|
|
||||||
if absContextDir, err = filepath.Abs(givenContextDir); err != nil {
|
|
||||||
return "", "", fmt.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The context dir might be a symbolic link, so follow it to the actual
|
|
||||||
// target directory.
|
|
||||||
//
|
|
||||||
// FIXME. We use isUNC (always false on non-Windows platforms) to workaround
|
|
||||||
// an issue in golang. On Windows, EvalSymLinks does not work on UNC file
|
|
||||||
// paths (those starting with \\). This hack means that when using links
|
|
||||||
// on UNC paths, they will not be followed.
|
|
||||||
if !isUNC(absContextDir) {
|
|
||||||
absContextDir, err = filepath.EvalSymlinks(absContextDir)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stat, err := os.Lstat(absContextDir)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !stat.IsDir() {
|
|
||||||
return "", "", fmt.Errorf("context must be a directory: %s", absContextDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
absDockerfile := givenDockerfile
|
|
||||||
if absDockerfile == "" {
|
|
||||||
// No -f/--file was specified so use the default relative to the
|
|
||||||
// context directory.
|
|
||||||
absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName)
|
|
||||||
|
|
||||||
// Just to be nice ;-) look for 'dockerfile' too but only
|
|
||||||
// use it if we found it, otherwise ignore this check
|
|
||||||
if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) {
|
|
||||||
altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName))
|
|
||||||
if _, err = os.Lstat(altPath); err == nil {
|
|
||||||
absDockerfile = altPath
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If not already an absolute path, the Dockerfile path should be joined to
|
|
||||||
// the base directory.
|
|
||||||
if !filepath.IsAbs(absDockerfile) {
|
|
||||||
absDockerfile = filepath.Join(absContextDir, absDockerfile)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Evaluate symlinks in the path to the Dockerfile too.
|
|
||||||
//
|
|
||||||
// FIXME. We use isUNC (always false on non-Windows platforms) to workaround
|
|
||||||
// an issue in golang. On Windows, EvalSymLinks does not work on UNC file
|
|
||||||
// paths (those starting with \\). This hack means that when using links
|
|
||||||
// on UNC paths, they will not be followed.
|
|
||||||
if !isUNC(absDockerfile) {
|
|
||||||
absDockerfile, err = filepath.EvalSymlinks(absDockerfile)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := os.Lstat(absDockerfile); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile)
|
|
||||||
}
|
|
||||||
return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil {
|
|
||||||
return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) {
|
|
||||||
return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
return absContextDir, relDockerfile, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isUNC returns true if the path is UNC (one starting \\). It always returns
|
|
||||||
// false on Linux.
|
|
||||||
func isUNC(path string) bool {
|
|
||||||
return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`)
|
|
||||||
}
|
|
11
vendor/github.com/docker/docker/builder/context_unix.go
generated
vendored
11
vendor/github.com/docker/docker/builder/context_unix.go
generated
vendored
|
@ -1,11 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package builder
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getContextRoot(srcPath string) (string, error) {
|
|
||||||
return filepath.Join(srcPath, "."), nil
|
|
||||||
}
|
|
17
vendor/github.com/docker/docker/builder/context_windows.go
generated
vendored
17
vendor/github.com/docker/docker/builder/context_windows.go
generated
vendored
|
@ -1,17 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package builder
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/longpath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getContextRoot(srcPath string) (string, error) {
|
|
||||||
cr, err := filepath.Abs(srcPath)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return longpath.AddPrefix(cr), nil
|
|
||||||
}
|
|
48
vendor/github.com/docker/docker/builder/dockerignore.go
generated
vendored
48
vendor/github.com/docker/docker/builder/dockerignore.go
generated
vendored
|
@ -1,48 +0,0 @@
|
||||||
package builder
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/docker/docker/builder/dockerignore"
|
|
||||||
"github.com/docker/docker/pkg/fileutils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DockerIgnoreContext wraps a ModifiableContext to add a method
|
|
||||||
// for handling the .dockerignore file at the root of the context.
|
|
||||||
type DockerIgnoreContext struct {
|
|
||||||
ModifiableContext
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process reads the .dockerignore file at the root of the embedded context.
|
|
||||||
// If .dockerignore does not exist in the context, then nil is returned.
|
|
||||||
//
|
|
||||||
// It can take a list of files to be removed after .dockerignore is removed.
|
|
||||||
// This is used for server-side implementations of builders that need to send
|
|
||||||
// the .dockerignore file as well as the special files specified in filesToRemove,
|
|
||||||
// but expect them to be excluded from the context after they were processed.
|
|
||||||
//
|
|
||||||
// For example, server-side Dockerfile builders are expected to pass in the name
|
|
||||||
// of the Dockerfile to be removed after it was parsed.
|
|
||||||
//
|
|
||||||
// TODO: Don't require a ModifiableContext (use Context instead) and don't remove
|
|
||||||
// files, instead handle a list of files to be excluded from the context.
|
|
||||||
func (c DockerIgnoreContext) Process(filesToRemove []string) error {
|
|
||||||
f, err := c.Open(".dockerignore")
|
|
||||||
// Note that a missing .dockerignore file isn't treated as an error
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
excludes, _ := dockerignore.ReadAll(f)
|
|
||||||
f.Close()
|
|
||||||
filesToRemove = append([]string{".dockerignore"}, filesToRemove...)
|
|
||||||
for _, fileToRemove := range filesToRemove {
|
|
||||||
rm, _ := fileutils.Matches(fileToRemove, excludes)
|
|
||||||
if rm {
|
|
||||||
c.Remove(fileToRemove)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
49
vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go
generated
vendored
49
vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go
generated
vendored
|
@ -1,49 +0,0 @@
|
||||||
package dockerignore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReadAll reads a .dockerignore file and returns the list of file patterns
|
|
||||||
// to ignore. Note this will trim whitespace from each line as well
|
|
||||||
// as use GO's "clean" func to get the shortest/cleanest path for each.
|
|
||||||
func ReadAll(reader io.Reader) ([]string, error) {
|
|
||||||
if reader == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(reader)
|
|
||||||
var excludes []string
|
|
||||||
currentLine := 0
|
|
||||||
|
|
||||||
utf8bom := []byte{0xEF, 0xBB, 0xBF}
|
|
||||||
for scanner.Scan() {
|
|
||||||
scannedBytes := scanner.Bytes()
|
|
||||||
// We trim UTF8 BOM
|
|
||||||
if currentLine == 0 {
|
|
||||||
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
|
|
||||||
}
|
|
||||||
pattern := string(scannedBytes)
|
|
||||||
currentLine++
|
|
||||||
// Lines starting with # (comments) are ignored before processing
|
|
||||||
if strings.HasPrefix(pattern, "#") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
pattern = strings.TrimSpace(pattern)
|
|
||||||
if pattern == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
pattern = filepath.Clean(pattern)
|
|
||||||
pattern = filepath.ToSlash(pattern)
|
|
||||||
excludes = append(excludes, pattern)
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return nil, fmt.Errorf("Error reading .dockerignore: %v", err)
|
|
||||||
}
|
|
||||||
return excludes, nil
|
|
||||||
}
|
|
28
vendor/github.com/docker/docker/builder/git.go
generated
vendored
28
vendor/github.com/docker/docker/builder/git.go
generated
vendored
|
@ -1,28 +0,0 @@
|
||||||
package builder
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/archive"
|
|
||||||
"github.com/docker/docker/pkg/gitutils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory.
|
|
||||||
func MakeGitContext(gitURL string) (ModifiableContext, error) {
|
|
||||||
root, err := gitutils.Clone(gitURL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := archive.Tar(root, archive.Uncompressed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
// TODO: print errors?
|
|
||||||
c.Close()
|
|
||||||
os.RemoveAll(root)
|
|
||||||
}()
|
|
||||||
return MakeTarSumContext(c)
|
|
||||||
}
|
|
157
vendor/github.com/docker/docker/builder/remote.go
generated
vendored
157
vendor/github.com/docker/docker/builder/remote.go
generated
vendored
|
@ -1,157 +0,0 @@
|
||||||
package builder
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/archive"
|
|
||||||
"github.com/docker/docker/pkg/httputils"
|
|
||||||
"github.com/docker/docker/pkg/urlutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// When downloading remote contexts, limit the amount (in bytes)
|
|
||||||
// to be read from the response body in order to detect its Content-Type
|
|
||||||
const maxPreambleLength = 100
|
|
||||||
|
|
||||||
const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))`
|
|
||||||
|
|
||||||
var mimeRe = regexp.MustCompile(acceptableRemoteMIME)
|
|
||||||
|
|
||||||
// MakeRemoteContext downloads a context from remoteURL and returns it.
|
|
||||||
//
|
|
||||||
// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of
|
|
||||||
// maxPreambleLength bytes from the body to help detecting the MIME type.
|
|
||||||
// Look at acceptableRemoteMIME for more details.
|
|
||||||
//
|
|
||||||
// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected
|
|
||||||
// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not).
|
|
||||||
// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned.
|
|
||||||
func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) {
|
|
||||||
f, err := httputils.Download(remoteURL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err)
|
|
||||||
}
|
|
||||||
defer f.Body.Close()
|
|
||||||
|
|
||||||
var contextReader io.ReadCloser
|
|
||||||
if contentTypeHandlers != nil {
|
|
||||||
contentType := f.Header.Get("Content-Type")
|
|
||||||
clen := f.ContentLength
|
|
||||||
|
|
||||||
contentType, contextReader, err = inspectResponse(contentType, f.Body, clen)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err)
|
|
||||||
}
|
|
||||||
defer contextReader.Close()
|
|
||||||
|
|
||||||
// This loop tries to find a content-type handler for the detected content-type.
|
|
||||||
// If it could not find one from the caller-supplied map, it tries the empty content-type `""`
|
|
||||||
// which is interpreted as a fallback handler (usually used for raw tar contexts).
|
|
||||||
for _, ct := range []string{contentType, ""} {
|
|
||||||
if fn, ok := contentTypeHandlers[ct]; ok {
|
|
||||||
defer contextReader.Close()
|
|
||||||
if contextReader, err = fn(contextReader); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pass through - this is a pre-packaged context, presumably
|
|
||||||
// with a Dockerfile with the right name inside it.
|
|
||||||
return MakeTarSumContext(contextReader)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used
|
|
||||||
// irrespective of user input.
|
|
||||||
// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint).
|
|
||||||
func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) {
|
|
||||||
switch {
|
|
||||||
case remoteURL == "":
|
|
||||||
context, err = MakeTarSumContext(r)
|
|
||||||
case urlutil.IsGitURL(remoteURL):
|
|
||||||
context, err = MakeGitContext(remoteURL)
|
|
||||||
case urlutil.IsURL(remoteURL):
|
|
||||||
context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){
|
|
||||||
httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) {
|
|
||||||
dockerfile, err := ioutil.ReadAll(rc)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller
|
|
||||||
// should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input.
|
|
||||||
dockerfileName = DefaultDockerfileName
|
|
||||||
|
|
||||||
// TODO: return a context without tarsum
|
|
||||||
r, err := archive.Generate(dockerfileName, string(dockerfile))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ioutil.NopCloser(r), nil
|
|
||||||
},
|
|
||||||
// fallback handler (tar context)
|
|
||||||
"": func(rc io.ReadCloser) (io.ReadCloser, error) {
|
|
||||||
return createProgressReader(rc), nil
|
|
||||||
},
|
|
||||||
})
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// inspectResponse looks into the http response data at r to determine whether its
|
|
||||||
// content-type is on the list of acceptable content types for remote build contexts.
|
|
||||||
// This function returns:
|
|
||||||
// - a string representation of the detected content-type
|
|
||||||
// - an io.Reader for the response body
|
|
||||||
// - an error value which will be non-nil either when something goes wrong while
|
|
||||||
// reading bytes from r or when the detected content-type is not acceptable.
|
|
||||||
func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) {
|
|
||||||
plen := clen
|
|
||||||
if plen <= 0 || plen > maxPreambleLength {
|
|
||||||
plen = maxPreambleLength
|
|
||||||
}
|
|
||||||
|
|
||||||
preamble := make([]byte, plen, plen)
|
|
||||||
rlen, err := r.Read(preamble)
|
|
||||||
if rlen == 0 {
|
|
||||||
return ct, r, errors.New("empty response")
|
|
||||||
}
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
return ct, r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
preambleR := bytes.NewReader(preamble)
|
|
||||||
bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r))
|
|
||||||
// Some web servers will use application/octet-stream as the default
|
|
||||||
// content type for files without an extension (e.g. 'Dockerfile')
|
|
||||||
// so if we receive this value we better check for text content
|
|
||||||
contentType := ct
|
|
||||||
if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream {
|
|
||||||
contentType, _, err = httputils.DetectContentType(preamble)
|
|
||||||
if err != nil {
|
|
||||||
return contentType, bodyReader, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
contentType = selectAcceptableMIME(contentType)
|
|
||||||
var cterr error
|
|
||||||
if len(contentType) == 0 {
|
|
||||||
cterr = fmt.Errorf("unsupported Content-Type %q", ct)
|
|
||||||
contentType = ct
|
|
||||||
}
|
|
||||||
|
|
||||||
return contentType, bodyReader, cterr
|
|
||||||
}
|
|
||||||
|
|
||||||
func selectAcceptableMIME(ct string) string {
|
|
||||||
return mimeRe.FindString(ct)
|
|
||||||
}
|
|
158
vendor/github.com/docker/docker/builder/tarsum.go
generated
vendored
158
vendor/github.com/docker/docker/builder/tarsum.go
generated
vendored
|
@ -1,158 +0,0 @@
|
||||||
package builder
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/archive"
|
|
||||||
"github.com/docker/docker/pkg/chrootarchive"
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
|
||||||
"github.com/docker/docker/pkg/symlink"
|
|
||||||
"github.com/docker/docker/pkg/tarsum"
|
|
||||||
)
|
|
||||||
|
|
||||||
type tarSumContext struct {
|
|
||||||
root string
|
|
||||||
sums tarsum.FileInfoSums
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *tarSumContext) Close() error {
|
|
||||||
return os.RemoveAll(c.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertPathError(err error, cleanpath string) error {
|
|
||||||
if err, ok := err.(*os.PathError); ok {
|
|
||||||
err.Path = cleanpath
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *tarSumContext) Open(path string) (io.ReadCloser, error) {
|
|
||||||
cleanpath, fullpath, err := c.normalize(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r, err := os.Open(fullpath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, convertPathError(err, cleanpath)
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *tarSumContext) Stat(path string) (string, FileInfo, error) {
|
|
||||||
cleanpath, fullpath, err := c.normalize(path)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
st, err := os.Lstat(fullpath)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, convertPathError(err, cleanpath)
|
|
||||||
}
|
|
||||||
|
|
||||||
rel, err := filepath.Rel(c.root, fullpath)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, convertPathError(err, cleanpath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We set sum to path by default for the case where GetFile returns nil.
|
|
||||||
// The usual case is if relative path is empty.
|
|
||||||
sum := path
|
|
||||||
// Use the checksum of the followed path(not the possible symlink) because
|
|
||||||
// this is the file that is actually copied.
|
|
||||||
if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil {
|
|
||||||
sum = tsInfo.Sum()
|
|
||||||
}
|
|
||||||
fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum}
|
|
||||||
return rel, fi, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeTarSumContext returns a build Context from a tar stream.
|
|
||||||
//
|
|
||||||
// It extracts the tar stream to a temporary folder that is deleted as soon as
|
|
||||||
// the Context is closed.
|
|
||||||
// As the extraction happens, a tarsum is calculated for every file, and the set of
|
|
||||||
// all those sums then becomes the source of truth for all operations on this Context.
|
|
||||||
//
|
|
||||||
// Closing tarStream has to be done by the caller.
|
|
||||||
func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) {
|
|
||||||
root, err := ioutils.TempDir("", "docker-builder")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
tsc := &tarSumContext{root: root}
|
|
||||||
|
|
||||||
// Make sure we clean-up upon error. In the happy case the caller
|
|
||||||
// is expected to manage the clean-up
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
tsc.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
decompressedStream, err := archive.DecompressStream(tarStream)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := chrootarchive.Untar(sum, root, nil); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
tsc.sums = sum.GetSums()
|
|
||||||
|
|
||||||
return tsc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) {
|
|
||||||
cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:]
|
|
||||||
fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath)
|
|
||||||
}
|
|
||||||
_, err = os.Lstat(fullpath)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", convertPathError(err, path)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error {
|
|
||||||
root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root))
|
|
||||||
return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error {
|
|
||||||
rel, err := filepath.Rel(c.root, fullpath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if rel == "." {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
sum := rel
|
|
||||||
if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil {
|
|
||||||
sum = tsInfo.Sum()
|
|
||||||
}
|
|
||||||
fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum}
|
|
||||||
if err := walkFn(rel, fi, nil); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *tarSumContext) Remove(path string) error {
|
|
||||||
_, fullpath, err := c.normalize(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return os.RemoveAll(fullpath)
|
|
||||||
}
|
|
120
vendor/github.com/docker/docker/cliconfig/config.go
generated
vendored
120
vendor/github.com/docker/docker/cliconfig/config.go
generated
vendored
|
@ -1,120 +0,0 @@
|
||||||
package cliconfig
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/cliconfig/configfile"
|
|
||||||
"github.com/docker/docker/pkg/homedir"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ConfigFileName is the name of config file
|
|
||||||
ConfigFileName = "config.json"
|
|
||||||
configFileDir = ".docker"
|
|
||||||
oldConfigfile = ".dockercfg"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
configDir = os.Getenv("DOCKER_CONFIG")
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
if configDir == "" {
|
|
||||||
configDir = filepath.Join(homedir.Get(), configFileDir)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigDir returns the directory the configuration file is stored in
|
|
||||||
func ConfigDir() string {
|
|
||||||
return configDir
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetConfigDir sets the directory the configuration file is stored in
|
|
||||||
func SetConfigDir(dir string) {
|
|
||||||
configDir = dir
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConfigFile initializes an empty configuration file for the given filename 'fn'
|
|
||||||
func NewConfigFile(fn string) *configfile.ConfigFile {
|
|
||||||
return &configfile.ConfigFile{
|
|
||||||
AuthConfigs: make(map[string]types.AuthConfig),
|
|
||||||
HTTPHeaders: make(map[string]string),
|
|
||||||
Filename: fn,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from
|
|
||||||
// a non-nested reader
|
|
||||||
func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
|
|
||||||
configFile := configfile.ConfigFile{
|
|
||||||
AuthConfigs: make(map[string]types.AuthConfig),
|
|
||||||
}
|
|
||||||
err := configFile.LegacyLoadFromReader(configData)
|
|
||||||
return &configFile, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadFromReader is a convenience function that creates a ConfigFile object from
|
|
||||||
// a reader
|
|
||||||
func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
|
|
||||||
configFile := configfile.ConfigFile{
|
|
||||||
AuthConfigs: make(map[string]types.AuthConfig),
|
|
||||||
}
|
|
||||||
err := configFile.LoadFromReader(configData)
|
|
||||||
return &configFile, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load reads the configuration files in the given directory, and sets up
|
|
||||||
// the auth config information and returns values.
|
|
||||||
// FIXME: use the internal golang config parser
|
|
||||||
func Load(configDir string) (*configfile.ConfigFile, error) {
|
|
||||||
if configDir == "" {
|
|
||||||
configDir = ConfigDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
configFile := configfile.ConfigFile{
|
|
||||||
AuthConfigs: make(map[string]types.AuthConfig),
|
|
||||||
Filename: filepath.Join(configDir, ConfigFileName),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try happy path first - latest config file
|
|
||||||
if _, err := os.Stat(configFile.Filename); err == nil {
|
|
||||||
file, err := os.Open(configFile.Filename)
|
|
||||||
if err != nil {
|
|
||||||
return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
err = configFile.LoadFromReader(file)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("%s - %v", configFile.Filename, err)
|
|
||||||
}
|
|
||||||
return &configFile, err
|
|
||||||
} else if !os.IsNotExist(err) {
|
|
||||||
// if file is there but we can't stat it for any reason other
|
|
||||||
// than it doesn't exist then stop
|
|
||||||
return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Can't find latest config file so check for the old one
|
|
||||||
confFile := filepath.Join(homedir.Get(), oldConfigfile)
|
|
||||||
if _, err := os.Stat(confFile); err != nil {
|
|
||||||
return &configFile, nil //missing file is not an error
|
|
||||||
}
|
|
||||||
file, err := os.Open(confFile)
|
|
||||||
if err != nil {
|
|
||||||
return &configFile, fmt.Errorf("%s - %v", confFile, err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
err = configFile.LegacyLoadFromReader(file)
|
|
||||||
if err != nil {
|
|
||||||
return &configFile, fmt.Errorf("%s - %v", confFile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if configFile.HTTPHeaders == nil {
|
|
||||||
configFile.HTTPHeaders = map[string]string{}
|
|
||||||
}
|
|
||||||
return &configFile, nil
|
|
||||||
}
|
|
183
vendor/github.com/docker/docker/cliconfig/configfile/file.go
generated
vendored
183
vendor/github.com/docker/docker/cliconfig/configfile/file.go
generated
vendored
|
@ -1,183 +0,0 @@
|
||||||
package configfile
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// This constant is only used for really old config files when the
|
|
||||||
// URL wasn't saved as part of the config file and it was just
|
|
||||||
// assumed to be this value.
|
|
||||||
defaultIndexserver = "https://index.docker.io/v1/"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ConfigFile ~/.docker/config.json file info
|
|
||||||
type ConfigFile struct {
|
|
||||||
AuthConfigs map[string]types.AuthConfig `json:"auths"`
|
|
||||||
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
|
|
||||||
PsFormat string `json:"psFormat,omitempty"`
|
|
||||||
ImagesFormat string `json:"imagesFormat,omitempty"`
|
|
||||||
NetworksFormat string `json:"networksFormat,omitempty"`
|
|
||||||
VolumesFormat string `json:"volumesFormat,omitempty"`
|
|
||||||
StatsFormat string `json:"statsFormat,omitempty"`
|
|
||||||
DetachKeys string `json:"detachKeys,omitempty"`
|
|
||||||
CredentialsStore string `json:"credsStore,omitempty"`
|
|
||||||
CredentialHelpers map[string]string `json:"credHelpers,omitempty"`
|
|
||||||
Filename string `json:"-"` // Note: for internal use only
|
|
||||||
ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// LegacyLoadFromReader reads the non-nested configuration data given and sets up the
|
|
||||||
// auth config information with given directory and populates the receiver object
|
|
||||||
func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
|
|
||||||
b, err := ioutil.ReadAll(configData)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil {
|
|
||||||
arr := strings.Split(string(b), "\n")
|
|
||||||
if len(arr) < 2 {
|
|
||||||
return fmt.Errorf("The Auth config file is empty")
|
|
||||||
}
|
|
||||||
authConfig := types.AuthConfig{}
|
|
||||||
origAuth := strings.Split(arr[0], " = ")
|
|
||||||
if len(origAuth) != 2 {
|
|
||||||
return fmt.Errorf("Invalid Auth config file")
|
|
||||||
}
|
|
||||||
authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
authConfig.ServerAddress = defaultIndexserver
|
|
||||||
configFile.AuthConfigs[defaultIndexserver] = authConfig
|
|
||||||
} else {
|
|
||||||
for k, authConfig := range configFile.AuthConfigs {
|
|
||||||
authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
authConfig.Auth = ""
|
|
||||||
authConfig.ServerAddress = k
|
|
||||||
configFile.AuthConfigs[k] = authConfig
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadFromReader reads the configuration data given and sets up the auth config
|
|
||||||
// information with given directory and populates the receiver object
|
|
||||||
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
|
|
||||||
if err := json.NewDecoder(configData).Decode(&configFile); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
for addr, ac := range configFile.AuthConfigs {
|
|
||||||
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ac.Auth = ""
|
|
||||||
ac.ServerAddress = addr
|
|
||||||
configFile.AuthConfigs[addr] = ac
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainsAuth returns whether there is authentication configured
|
|
||||||
// in this file or not.
|
|
||||||
func (configFile *ConfigFile) ContainsAuth() bool {
|
|
||||||
return configFile.CredentialsStore != "" ||
|
|
||||||
len(configFile.CredentialHelpers) > 0 ||
|
|
||||||
len(configFile.AuthConfigs) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveToWriter encodes and writes out all the authorization information to
|
|
||||||
// the given writer
|
|
||||||
func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
|
|
||||||
// Encode sensitive data into a new/temp struct
|
|
||||||
tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs))
|
|
||||||
for k, authConfig := range configFile.AuthConfigs {
|
|
||||||
authCopy := authConfig
|
|
||||||
// encode and save the authstring, while blanking out the original fields
|
|
||||||
authCopy.Auth = encodeAuth(&authCopy)
|
|
||||||
authCopy.Username = ""
|
|
||||||
authCopy.Password = ""
|
|
||||||
authCopy.ServerAddress = ""
|
|
||||||
tmpAuthConfigs[k] = authCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
saveAuthConfigs := configFile.AuthConfigs
|
|
||||||
configFile.AuthConfigs = tmpAuthConfigs
|
|
||||||
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
|
|
||||||
|
|
||||||
data, err := json.MarshalIndent(configFile, "", "\t")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = writer.Write(data)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save encodes and writes out all the authorization information
|
|
||||||
func (configFile *ConfigFile) Save() error {
|
|
||||||
if configFile.Filename == "" {
|
|
||||||
return fmt.Errorf("Can't save config with empty filename")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
return configFile.SaveToWriter(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// encodeAuth creates a base64 encoded string to containing authorization information
|
|
||||||
func encodeAuth(authConfig *types.AuthConfig) string {
|
|
||||||
if authConfig.Username == "" && authConfig.Password == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
authStr := authConfig.Username + ":" + authConfig.Password
|
|
||||||
msg := []byte(authStr)
|
|
||||||
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
|
|
||||||
base64.StdEncoding.Encode(encoded, msg)
|
|
||||||
return string(encoded)
|
|
||||||
}
|
|
||||||
|
|
||||||
// decodeAuth decodes a base64 encoded string and returns username and password
|
|
||||||
func decodeAuth(authStr string) (string, string, error) {
|
|
||||||
if authStr == "" {
|
|
||||||
return "", "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
decLen := base64.StdEncoding.DecodedLen(len(authStr))
|
|
||||||
decoded := make([]byte, decLen)
|
|
||||||
authByte := []byte(authStr)
|
|
||||||
n, err := base64.StdEncoding.Decode(decoded, authByte)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
if n > decLen {
|
|
||||||
return "", "", fmt.Errorf("Something went wrong decoding auth config")
|
|
||||||
}
|
|
||||||
arr := strings.SplitN(string(decoded), ":", 2)
|
|
||||||
if len(arr) != 2 {
|
|
||||||
return "", "", fmt.Errorf("Invalid auth configuration file")
|
|
||||||
}
|
|
||||||
password := strings.Trim(arr[1], "\x00")
|
|
||||||
return arr[0], password, nil
|
|
||||||
}
|
|
67
vendor/github.com/docker/docker/daemon/graphdriver/counter.go
generated
vendored
67
vendor/github.com/docker/docker/daemon/graphdriver/counter.go
generated
vendored
|
@ -1,67 +0,0 @@
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
type minfo struct {
|
|
||||||
check bool
|
|
||||||
count int
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefCounter is a generic counter for use by graphdriver Get/Put calls
|
|
||||||
type RefCounter struct {
|
|
||||||
counts map[string]*minfo
|
|
||||||
mu sync.Mutex
|
|
||||||
checker Checker
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRefCounter returns a new RefCounter
|
|
||||||
func NewRefCounter(c Checker) *RefCounter {
|
|
||||||
return &RefCounter{
|
|
||||||
checker: c,
|
|
||||||
counts: make(map[string]*minfo),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Increment increaes the ref count for the given id and returns the current count
|
|
||||||
func (c *RefCounter) Increment(path string) int {
|
|
||||||
c.mu.Lock()
|
|
||||||
m := c.counts[path]
|
|
||||||
if m == nil {
|
|
||||||
m = &minfo{}
|
|
||||||
c.counts[path] = m
|
|
||||||
}
|
|
||||||
// if we are checking this path for the first time check to make sure
|
|
||||||
// if it was already mounted on the system and make sure we have a correct ref
|
|
||||||
// count if it is mounted as it is in use.
|
|
||||||
if !m.check {
|
|
||||||
m.check = true
|
|
||||||
if c.checker.IsMounted(path) {
|
|
||||||
m.count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.count++
|
|
||||||
c.mu.Unlock()
|
|
||||||
return m.count
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrement decreases the ref count for the given id and returns the current count
|
|
||||||
func (c *RefCounter) Decrement(path string) int {
|
|
||||||
c.mu.Lock()
|
|
||||||
m := c.counts[path]
|
|
||||||
if m == nil {
|
|
||||||
m = &minfo{}
|
|
||||||
c.counts[path] = m
|
|
||||||
}
|
|
||||||
// if we are checking this path for the first time check to make sure
|
|
||||||
// if it was already mounted on the system and make sure we have a correct ref
|
|
||||||
// count if it is mounted as it is in use.
|
|
||||||
if !m.check {
|
|
||||||
m.check = true
|
|
||||||
if c.checker.IsMounted(path) {
|
|
||||||
m.count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.count--
|
|
||||||
c.mu.Unlock()
|
|
||||||
return m.count
|
|
||||||
}
|
|
270
vendor/github.com/docker/docker/daemon/graphdriver/driver.go
generated
vendored
270
vendor/github.com/docker/docker/daemon/graphdriver/driver.go
generated
vendored
|
@ -1,270 +0,0 @@
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/vbatts/tar-split/tar/storage"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/archive"
|
|
||||||
"github.com/docker/docker/pkg/idtools"
|
|
||||||
"github.com/docker/docker/pkg/plugingetter"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FsMagic unsigned id of the filesystem in use.
|
|
||||||
type FsMagic uint32
|
|
||||||
|
|
||||||
const (
|
|
||||||
// FsMagicUnsupported is a predefined constant value other than a valid filesystem id.
|
|
||||||
FsMagicUnsupported = FsMagic(0x00000000)
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// All registered drivers
|
|
||||||
drivers map[string]InitFunc
|
|
||||||
|
|
||||||
// ErrNotSupported returned when driver is not supported.
|
|
||||||
ErrNotSupported = errors.New("driver not supported")
|
|
||||||
// ErrPrerequisites retuned when driver does not meet prerequisites.
|
|
||||||
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
|
|
||||||
// ErrIncompatibleFS returned when file system is not supported.
|
|
||||||
ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
|
|
||||||
)
|
|
||||||
|
|
||||||
//CreateOpts contains optional arguments for Create() and CreateReadWrite()
|
|
||||||
// methods.
|
|
||||||
type CreateOpts struct {
|
|
||||||
MountLabel string
|
|
||||||
StorageOpt map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitFunc initializes the storage driver.
|
|
||||||
type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error)
|
|
||||||
|
|
||||||
// ProtoDriver defines the basic capabilities of a driver.
|
|
||||||
// This interface exists solely to be a minimum set of methods
|
|
||||||
// for client code which choose not to implement the entire Driver
|
|
||||||
// interface and use the NaiveDiffDriver wrapper constructor.
|
|
||||||
//
|
|
||||||
// Use of ProtoDriver directly by client code is not recommended.
|
|
||||||
type ProtoDriver interface {
|
|
||||||
// String returns a string representation of this driver.
|
|
||||||
String() string
|
|
||||||
// CreateReadWrite creates a new, empty filesystem layer that is ready
|
|
||||||
// to be used as the storage for a container. Additional options can
|
|
||||||
// be passed in opts. parent may be "" and opts may be nil.
|
|
||||||
CreateReadWrite(id, parent string, opts *CreateOpts) error
|
|
||||||
// Create creates a new, empty, filesystem layer with the
|
|
||||||
// specified id and parent and options passed in opts. Parent
|
|
||||||
// may be "" and opts may be nil.
|
|
||||||
Create(id, parent string, opts *CreateOpts) error
|
|
||||||
// Remove attempts to remove the filesystem layer with this id.
|
|
||||||
Remove(id string) error
|
|
||||||
// Get returns the mountpoint for the layered filesystem referred
|
|
||||||
// to by this id. You can optionally specify a mountLabel or "".
|
|
||||||
// Returns the absolute path to the mounted layered filesystem.
|
|
||||||
Get(id, mountLabel string) (dir string, err error)
|
|
||||||
// Put releases the system resources for the specified id,
|
|
||||||
// e.g, unmounting layered filesystem.
|
|
||||||
Put(id string) error
|
|
||||||
// Exists returns whether a filesystem layer with the specified
|
|
||||||
// ID exists on this driver.
|
|
||||||
Exists(id string) bool
|
|
||||||
// Status returns a set of key-value pairs which give low
|
|
||||||
// level diagnostic status about this driver.
|
|
||||||
Status() [][2]string
|
|
||||||
// Returns a set of key-value pairs which give low level information
|
|
||||||
// about the image/container driver is managing.
|
|
||||||
GetMetadata(id string) (map[string]string, error)
|
|
||||||
// Cleanup performs necessary tasks to release resources
|
|
||||||
// held by the driver, e.g., unmounting all layered filesystems
|
|
||||||
// known to this driver.
|
|
||||||
Cleanup() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// DiffDriver is the interface to use to implement graph diffs
|
|
||||||
type DiffDriver interface {
|
|
||||||
// Diff produces an archive of the changes between the specified
|
|
||||||
// layer and its parent layer which may be "".
|
|
||||||
Diff(id, parent string) (io.ReadCloser, error)
|
|
||||||
// Changes produces a list of changes between the specified layer
|
|
||||||
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
|
||||||
Changes(id, parent string) ([]archive.Change, error)
|
|
||||||
// ApplyDiff extracts the changeset from the given diff into the
|
|
||||||
// layer with the specified id and parent, returning the size of the
|
|
||||||
// new layer in bytes.
|
|
||||||
// The archive.Reader must be an uncompressed stream.
|
|
||||||
ApplyDiff(id, parent string, diff io.Reader) (size int64, err error)
|
|
||||||
// DiffSize calculates the changes between the specified id
|
|
||||||
// and its parent and returns the size in bytes of the changes
|
|
||||||
// relative to its base filesystem directory.
|
|
||||||
DiffSize(id, parent string) (size int64, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Driver is the interface for layered/snapshot file system drivers.
|
|
||||||
type Driver interface {
|
|
||||||
ProtoDriver
|
|
||||||
DiffDriver
|
|
||||||
}
|
|
||||||
|
|
||||||
// DiffGetterDriver is the interface for layered file system drivers that
|
|
||||||
// provide a specialized function for getting file contents for tar-split.
|
|
||||||
type DiffGetterDriver interface {
|
|
||||||
Driver
|
|
||||||
// DiffGetter returns an interface to efficiently retrieve the contents
|
|
||||||
// of files in a layer.
|
|
||||||
DiffGetter(id string) (FileGetCloser, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileGetCloser extends the storage.FileGetter interface with a Close method
|
|
||||||
// for cleaning up.
|
|
||||||
type FileGetCloser interface {
|
|
||||||
storage.FileGetter
|
|
||||||
// Close cleans up any resources associated with the FileGetCloser.
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checker makes checks on specified filesystems.
|
|
||||||
type Checker interface {
|
|
||||||
// IsMounted returns true if the provided path is mounted for the specific checker
|
|
||||||
IsMounted(path string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
drivers = make(map[string]InitFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register registers an InitFunc for the driver.
|
|
||||||
func Register(name string, initFunc InitFunc) error {
|
|
||||||
if _, exists := drivers[name]; exists {
|
|
||||||
return fmt.Errorf("Name already registered %s", name)
|
|
||||||
}
|
|
||||||
drivers[name] = initFunc
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDriver initializes and returns the registered driver
|
|
||||||
func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) {
|
|
||||||
if initFunc, exists := drivers[name]; exists {
|
|
||||||
return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
|
|
||||||
}
|
|
||||||
|
|
||||||
pluginDriver, err := lookupPlugin(name, pg, config)
|
|
||||||
if err == nil {
|
|
||||||
return pluginDriver, nil
|
|
||||||
}
|
|
||||||
logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph")
|
|
||||||
return nil, ErrNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
|
|
||||||
func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
|
|
||||||
if initFunc, exists := drivers[name]; exists {
|
|
||||||
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
|
|
||||||
}
|
|
||||||
logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
|
|
||||||
return nil, ErrNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options is used to initialize a graphdriver
|
|
||||||
type Options struct {
|
|
||||||
Root string
|
|
||||||
DriverOptions []string
|
|
||||||
UIDMaps []idtools.IDMap
|
|
||||||
GIDMaps []idtools.IDMap
|
|
||||||
ExperimentalEnabled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates the driver and initializes it at the specified root.
|
|
||||||
func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) {
|
|
||||||
if name != "" {
|
|
||||||
logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver
|
|
||||||
return GetDriver(name, pg, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Guess for prior driver
|
|
||||||
driversMap := scanPriorDrivers(config.Root)
|
|
||||||
for _, name := range priority {
|
|
||||||
if name == "vfs" {
|
|
||||||
// don't use vfs even if there is state present.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, prior := driversMap[name]; prior {
|
|
||||||
// of the state found from prior drivers, check in order of our priority
|
|
||||||
// which we would prefer
|
|
||||||
driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
// unlike below, we will return error here, because there is prior
|
|
||||||
// state, and now it is no longer supported/prereq/compatible, so
|
|
||||||
// something changed and needs attention. Otherwise the daemon's
|
|
||||||
// images would just "disappear".
|
|
||||||
logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// abort starting when there are other prior configured drivers
|
|
||||||
// to ensure the user explicitly selects the driver to load
|
|
||||||
if len(driversMap)-1 > 0 {
|
|
||||||
var driversSlice []string
|
|
||||||
for name := range driversMap {
|
|
||||||
driversSlice = append(driversSlice, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", config.Root, strings.Join(driversSlice, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Infof("[graphdriver] using prior storage driver: %s", name)
|
|
||||||
return driver, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for priority drivers first
|
|
||||||
for _, name := range priority {
|
|
||||||
driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
if isDriverNotSupported(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return driver, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check all registered drivers if no priority driver is found
|
|
||||||
for name, initFunc := range drivers {
|
|
||||||
driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
if isDriverNotSupported(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return driver, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("No supported storage backend found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// isDriverNotSupported returns true if the error initializing
|
|
||||||
// the graph driver is a non-supported error.
|
|
||||||
func isDriverNotSupported(err error) bool {
|
|
||||||
return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
|
|
||||||
func scanPriorDrivers(root string) map[string]bool {
|
|
||||||
driversMap := make(map[string]bool)
|
|
||||||
|
|
||||||
for driver := range drivers {
|
|
||||||
p := filepath.Join(root, driver)
|
|
||||||
if _, err := os.Stat(p); err == nil && driver != "vfs" {
|
|
||||||
driversMap[driver] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return driversMap
|
|
||||||
}
|
|
19
vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go
generated
vendored
19
vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go
generated
vendored
|
@ -1,19 +0,0 @@
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Slice of drivers that should be used in an order
|
|
||||||
priority = []string{
|
|
||||||
"zfs",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// Mounted checks if the given path is mounted as the fs type
|
|
||||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
|
||||||
var buf syscall.Statfs_t
|
|
||||||
if err := syscall.Statfs(mountPath, &buf); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return FsMagic(buf.Type) == fsType, nil
|
|
||||||
}
|
|
135
vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go
generated
vendored
135
vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go
generated
vendored
|
@ -1,135 +0,0 @@
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/mount"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// FsMagicAufs filesystem id for Aufs
|
|
||||||
FsMagicAufs = FsMagic(0x61756673)
|
|
||||||
// FsMagicBtrfs filesystem id for Btrfs
|
|
||||||
FsMagicBtrfs = FsMagic(0x9123683E)
|
|
||||||
// FsMagicCramfs filesystem id for Cramfs
|
|
||||||
FsMagicCramfs = FsMagic(0x28cd3d45)
|
|
||||||
// FsMagicEcryptfs filesystem id for eCryptfs
|
|
||||||
FsMagicEcryptfs = FsMagic(0xf15f)
|
|
||||||
// FsMagicExtfs filesystem id for Extfs
|
|
||||||
FsMagicExtfs = FsMagic(0x0000EF53)
|
|
||||||
// FsMagicF2fs filesystem id for F2fs
|
|
||||||
FsMagicF2fs = FsMagic(0xF2F52010)
|
|
||||||
// FsMagicGPFS filesystem id for GPFS
|
|
||||||
FsMagicGPFS = FsMagic(0x47504653)
|
|
||||||
// FsMagicJffs2Fs filesystem if for Jffs2Fs
|
|
||||||
FsMagicJffs2Fs = FsMagic(0x000072b6)
|
|
||||||
// FsMagicJfs filesystem id for Jfs
|
|
||||||
FsMagicJfs = FsMagic(0x3153464a)
|
|
||||||
// FsMagicNfsFs filesystem id for NfsFs
|
|
||||||
FsMagicNfsFs = FsMagic(0x00006969)
|
|
||||||
// FsMagicRAMFs filesystem id for RamFs
|
|
||||||
FsMagicRAMFs = FsMagic(0x858458f6)
|
|
||||||
// FsMagicReiserFs filesystem id for ReiserFs
|
|
||||||
FsMagicReiserFs = FsMagic(0x52654973)
|
|
||||||
// FsMagicSmbFs filesystem id for SmbFs
|
|
||||||
FsMagicSmbFs = FsMagic(0x0000517B)
|
|
||||||
// FsMagicSquashFs filesystem id for SquashFs
|
|
||||||
FsMagicSquashFs = FsMagic(0x73717368)
|
|
||||||
// FsMagicTmpFs filesystem id for TmpFs
|
|
||||||
FsMagicTmpFs = FsMagic(0x01021994)
|
|
||||||
// FsMagicVxFS filesystem id for VxFs
|
|
||||||
FsMagicVxFS = FsMagic(0xa501fcf5)
|
|
||||||
// FsMagicXfs filesystem id for Xfs
|
|
||||||
FsMagicXfs = FsMagic(0x58465342)
|
|
||||||
// FsMagicZfs filesystem id for Zfs
|
|
||||||
FsMagicZfs = FsMagic(0x2fc12fc1)
|
|
||||||
// FsMagicOverlay filesystem id for overlay
|
|
||||||
FsMagicOverlay = FsMagic(0x794C7630)
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Slice of drivers that should be used in an order
|
|
||||||
priority = []string{
|
|
||||||
"aufs",
|
|
||||||
"btrfs",
|
|
||||||
"zfs",
|
|
||||||
"overlay2",
|
|
||||||
"overlay",
|
|
||||||
"devicemapper",
|
|
||||||
"vfs",
|
|
||||||
}
|
|
||||||
|
|
||||||
// FsNames maps filesystem id to name of the filesystem.
|
|
||||||
FsNames = map[FsMagic]string{
|
|
||||||
FsMagicAufs: "aufs",
|
|
||||||
FsMagicBtrfs: "btrfs",
|
|
||||||
FsMagicCramfs: "cramfs",
|
|
||||||
FsMagicExtfs: "extfs",
|
|
||||||
FsMagicF2fs: "f2fs",
|
|
||||||
FsMagicGPFS: "gpfs",
|
|
||||||
FsMagicJffs2Fs: "jffs2",
|
|
||||||
FsMagicJfs: "jfs",
|
|
||||||
FsMagicNfsFs: "nfs",
|
|
||||||
FsMagicOverlay: "overlayfs",
|
|
||||||
FsMagicRAMFs: "ramfs",
|
|
||||||
FsMagicReiserFs: "reiserfs",
|
|
||||||
FsMagicSmbFs: "smb",
|
|
||||||
FsMagicSquashFs: "squashfs",
|
|
||||||
FsMagicTmpFs: "tmpfs",
|
|
||||||
FsMagicUnsupported: "unsupported",
|
|
||||||
FsMagicVxFS: "vxfs",
|
|
||||||
FsMagicXfs: "xfs",
|
|
||||||
FsMagicZfs: "zfs",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetFSMagic returns the filesystem id given the path.
|
|
||||||
func GetFSMagic(rootpath string) (FsMagic, error) {
|
|
||||||
var buf syscall.Statfs_t
|
|
||||||
if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return FsMagic(buf.Type), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFsChecker returns a checker configured for the provied FsMagic
|
|
||||||
func NewFsChecker(t FsMagic) Checker {
|
|
||||||
return &fsChecker{
|
|
||||||
t: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type fsChecker struct {
|
|
||||||
t FsMagic
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fsChecker) IsMounted(path string) bool {
|
|
||||||
m, _ := Mounted(c.t, path)
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
|
|
||||||
// if the specified path is mounted.
|
|
||||||
func NewDefaultChecker() Checker {
|
|
||||||
return &defaultChecker{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type defaultChecker struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *defaultChecker) IsMounted(path string) bool {
|
|
||||||
m, _ := mount.Mounted(path)
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mounted checks if the given path is mounted as the fs type
|
|
||||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
|
||||||
var buf syscall.Statfs_t
|
|
||||||
if err := syscall.Statfs(mountPath, &buf); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return FsMagic(buf.Type) == fsType, nil
|
|
||||||
}
|
|
97
vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go
generated
vendored
97
vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go
generated
vendored
|
@ -1,97 +0,0 @@
|
||||||
// +build solaris,cgo
|
|
||||||
|
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
/*
|
|
||||||
#include <sys/statvfs.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
|
|
||||||
static inline struct statvfs *getstatfs(char *s) {
|
|
||||||
struct statvfs *buf;
|
|
||||||
int err;
|
|
||||||
buf = (struct statvfs *)malloc(sizeof(struct statvfs));
|
|
||||||
err = statvfs(s, buf);
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/docker/pkg/mount"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// FsMagicZfs filesystem id for Zfs
|
|
||||||
FsMagicZfs = FsMagic(0x2fc12fc1)
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Slice of drivers that should be used in an order
|
|
||||||
priority = []string{
|
|
||||||
"zfs",
|
|
||||||
}
|
|
||||||
|
|
||||||
// FsNames maps filesystem id to name of the filesystem.
|
|
||||||
FsNames = map[FsMagic]string{
|
|
||||||
FsMagicZfs: "zfs",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetFSMagic returns the filesystem id given the path.
|
|
||||||
func GetFSMagic(rootpath string) (FsMagic, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type fsChecker struct {
|
|
||||||
t FsMagic
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fsChecker) IsMounted(path string) bool {
|
|
||||||
m, _ := Mounted(c.t, path)
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFsChecker returns a checker configured for the provied FsMagic
|
|
||||||
func NewFsChecker(t FsMagic) Checker {
|
|
||||||
return &fsChecker{
|
|
||||||
t: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
|
|
||||||
// if the specified path is mounted.
|
|
||||||
// No-op on Solaris.
|
|
||||||
func NewDefaultChecker() Checker {
|
|
||||||
return &defaultChecker{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type defaultChecker struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *defaultChecker) IsMounted(path string) bool {
|
|
||||||
m, _ := mount.Mounted(path)
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mounted checks if the given path is mounted as the fs type
|
|
||||||
//Solaris supports only ZFS for now
|
|
||||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
|
||||||
|
|
||||||
cs := C.CString(filepath.Dir(mountPath))
|
|
||||||
buf := C.getstatfs(cs)
|
|
||||||
|
|
||||||
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
|
|
||||||
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
|
|
||||||
(buf.f_basetype[3] != 0) {
|
|
||||||
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
|
|
||||||
C.free(unsafe.Pointer(buf))
|
|
||||||
return false, ErrPrerequisites
|
|
||||||
}
|
|
||||||
|
|
||||||
C.free(unsafe.Pointer(buf))
|
|
||||||
C.free(unsafe.Pointer(cs))
|
|
||||||
return true, nil
|
|
||||||
}
|
|
15
vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go
generated
vendored
15
vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go
generated
vendored
|
@ -1,15 +0,0 @@
|
||||||
// +build !linux,!windows,!freebsd,!solaris
|
|
||||||
|
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Slice of drivers that should be used in an order
|
|
||||||
priority = []string{
|
|
||||||
"unsupported",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetFSMagic returns the filesystem id given the path.
|
|
||||||
func GetFSMagic(rootpath string) (FsMagic, error) {
|
|
||||||
return FsMagicUnsupported, nil
|
|
||||||
}
|
|
14
vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go
generated
vendored
14
vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go
generated
vendored
|
@ -1,14 +0,0 @@
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Slice of drivers that should be used in order
|
|
||||||
priority = []string{
|
|
||||||
"windowsfilter",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetFSMagic returns the filesystem id given the path.
|
|
||||||
func GetFSMagic(rootpath string) (FsMagic, error) {
|
|
||||||
// Note it is OK to return FsMagicUnsupported on Windows.
|
|
||||||
return FsMagicUnsupported, nil
|
|
||||||
}
|
|
169
vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go
generated
vendored
169
vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go
generated
vendored
|
@ -1,169 +0,0 @@
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/docker/pkg/archive"
|
|
||||||
"github.com/docker/docker/pkg/chrootarchive"
|
|
||||||
"github.com/docker/docker/pkg/idtools"
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ApplyUncompressedLayer defines the unpack method used by the graph
|
|
||||||
// driver.
|
|
||||||
ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
|
|
||||||
)
|
|
||||||
|
|
||||||
// NaiveDiffDriver takes a ProtoDriver and adds the
|
|
||||||
// capability of the Diffing methods which it may or may not
|
|
||||||
// support on its own. See the comment on the exported
|
|
||||||
// NewNaiveDiffDriver function below.
|
|
||||||
// Notably, the AUFS driver doesn't need to be wrapped like this.
|
|
||||||
type NaiveDiffDriver struct {
|
|
||||||
ProtoDriver
|
|
||||||
uidMaps []idtools.IDMap
|
|
||||||
gidMaps []idtools.IDMap
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNaiveDiffDriver returns a fully functional driver that wraps the
|
|
||||||
// given ProtoDriver and adds the capability of the following methods which
|
|
||||||
// it may or may not support on its own:
|
|
||||||
// Diff(id, parent string) (archive.Archive, error)
|
|
||||||
// Changes(id, parent string) ([]archive.Change, error)
|
|
||||||
// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
|
|
||||||
// DiffSize(id, parent string) (size int64, err error)
|
|
||||||
func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
|
|
||||||
return &NaiveDiffDriver{ProtoDriver: driver,
|
|
||||||
uidMaps: uidMaps,
|
|
||||||
gidMaps: gidMaps}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Diff produces an archive of the changes between the specified
|
|
||||||
// layer and its parent layer which may be "".
|
|
||||||
func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) {
|
|
||||||
startTime := time.Now()
|
|
||||||
driver := gdw.ProtoDriver
|
|
||||||
|
|
||||||
layerFs, err := driver.Get(id, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
driver.Put(id)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if parent == "" {
|
|
||||||
archive, err := archive.Tar(layerFs, archive.Uncompressed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ioutils.NewReadCloserWrapper(archive, func() error {
|
|
||||||
err := archive.Close()
|
|
||||||
driver.Put(id)
|
|
||||||
return err
|
|
||||||
}), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
parentFs, err := driver.Get(parent, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer driver.Put(parent)
|
|
||||||
|
|
||||||
changes, err := archive.ChangesDirs(layerFs, parentFs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ioutils.NewReadCloserWrapper(archive, func() error {
|
|
||||||
err := archive.Close()
|
|
||||||
driver.Put(id)
|
|
||||||
|
|
||||||
// NaiveDiffDriver compares file metadata with parent layers. Parent layers
|
|
||||||
// are extracted from tar's with full second precision on modified time.
|
|
||||||
// We need this hack here to make sure calls within same second receive
|
|
||||||
// correct result.
|
|
||||||
time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
|
|
||||||
return err
|
|
||||||
}), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Changes produces a list of changes between the specified layer
|
|
||||||
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
|
||||||
func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
|
|
||||||
driver := gdw.ProtoDriver
|
|
||||||
|
|
||||||
layerFs, err := driver.Get(id, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer driver.Put(id)
|
|
||||||
|
|
||||||
parentFs := ""
|
|
||||||
|
|
||||||
if parent != "" {
|
|
||||||
parentFs, err = driver.Get(parent, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer driver.Put(parent)
|
|
||||||
}
|
|
||||||
|
|
||||||
return archive.ChangesDirs(layerFs, parentFs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ApplyDiff extracts the changeset from the given diff into the
|
|
||||||
// layer with the specified id and parent, returning the size of the
|
|
||||||
// new layer in bytes.
|
|
||||||
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
|
|
||||||
driver := gdw.ProtoDriver
|
|
||||||
|
|
||||||
// Mount the root filesystem so we can apply the diff/layer.
|
|
||||||
layerFs, err := driver.Get(id, "")
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer driver.Put(id)
|
|
||||||
|
|
||||||
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
|
|
||||||
GIDMaps: gdw.gidMaps}
|
|
||||||
start := time.Now().UTC()
|
|
||||||
logrus.Debug("Start untar layer")
|
|
||||||
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DiffSize calculates the changes between the specified layer
|
|
||||||
// and its parent and returns the size in bytes of the changes
|
|
||||||
// relative to its base filesystem directory.
|
|
||||||
func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) {
|
|
||||||
driver := gdw.ProtoDriver
|
|
||||||
|
|
||||||
changes, err := gdw.Changes(id, parent)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
layerFs, err := driver.Get(id, "")
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer driver.Put(id)
|
|
||||||
|
|
||||||
return archive.ChangesSize(layerFs, changes), nil
|
|
||||||
}
|
|
43
vendor/github.com/docker/docker/daemon/graphdriver/plugin.go
generated
vendored
43
vendor/github.com/docker/docker/daemon/graphdriver/plugin.go
generated
vendored
|
@ -1,43 +0,0 @@
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/plugingetter"
|
|
||||||
"github.com/docker/docker/plugin/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type pluginClient interface {
|
|
||||||
// Call calls the specified method with the specified arguments for the plugin.
|
|
||||||
Call(string, interface{}, interface{}) error
|
|
||||||
// Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream
|
|
||||||
Stream(string, interface{}) (io.ReadCloser, error)
|
|
||||||
// SendFile calls the specified method, and passes through the IO stream
|
|
||||||
SendFile(string, io.Reader, interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) {
|
|
||||||
if !config.ExperimentalEnabled {
|
|
||||||
return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode")
|
|
||||||
}
|
|
||||||
pl, err := pg.Get(name, "GraphDriver", plugingetter.ACQUIRE)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
|
|
||||||
}
|
|
||||||
return newPluginDriver(name, pl, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) {
|
|
||||||
home := config.Root
|
|
||||||
if !pl.IsV1() {
|
|
||||||
if p, ok := pl.(*v2.Plugin); ok {
|
|
||||||
if p.PropagatedMount != "" {
|
|
||||||
home = p.PluginObj.Config.PropagatedMount
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
proxy := &graphDriverProxy{name, pl}
|
|
||||||
return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
|
|
||||||
}
|
|
252
vendor/github.com/docker/docker/daemon/graphdriver/proxy.go
generated
vendored
252
vendor/github.com/docker/docker/daemon/graphdriver/proxy.go
generated
vendored
|
@ -1,252 +0,0 @@
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/archive"
|
|
||||||
"github.com/docker/docker/pkg/idtools"
|
|
||||||
"github.com/docker/docker/pkg/plugingetter"
|
|
||||||
)
|
|
||||||
|
|
||||||
type graphDriverProxy struct {
|
|
||||||
name string
|
|
||||||
p plugingetter.CompatPlugin
|
|
||||||
}
|
|
||||||
|
|
||||||
type graphDriverRequest struct {
|
|
||||||
ID string `json:",omitempty"`
|
|
||||||
Parent string `json:",omitempty"`
|
|
||||||
MountLabel string `json:",omitempty"`
|
|
||||||
StorageOpt map[string]string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type graphDriverResponse struct {
|
|
||||||
Err string `json:",omitempty"`
|
|
||||||
Dir string `json:",omitempty"`
|
|
||||||
Exists bool `json:",omitempty"`
|
|
||||||
Status [][2]string `json:",omitempty"`
|
|
||||||
Changes []archive.Change `json:",omitempty"`
|
|
||||||
Size int64 `json:",omitempty"`
|
|
||||||
Metadata map[string]string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type graphDriverInitRequest struct {
|
|
||||||
Home string
|
|
||||||
Opts []string `json:"Opts"`
|
|
||||||
UIDMaps []idtools.IDMap `json:"UIDMaps"`
|
|
||||||
GIDMaps []idtools.IDMap `json:"GIDMaps"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error {
|
|
||||||
if !d.p.IsV1() {
|
|
||||||
if cp, ok := d.p.(plugingetter.CountedPlugin); ok {
|
|
||||||
// always acquire here, it will be cleaned up on daemon shutdown
|
|
||||||
cp.Acquire()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
args := &graphDriverInitRequest{
|
|
||||||
Home: home,
|
|
||||||
Opts: opts,
|
|
||||||
UIDMaps: uidMaps,
|
|
||||||
GIDMaps: gidMaps,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) String() string {
|
|
||||||
return d.name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
Parent: parent,
|
|
||||||
}
|
|
||||||
if opts != nil {
|
|
||||||
args.MountLabel = opts.MountLabel
|
|
||||||
args.StorageOpt = opts.StorageOpt
|
|
||||||
}
|
|
||||||
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
Parent: parent,
|
|
||||||
}
|
|
||||||
if opts != nil {
|
|
||||||
args.MountLabel = opts.MountLabel
|
|
||||||
args.StorageOpt = opts.StorageOpt
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().Call("GraphDriver.Create", args, &ret); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Remove(id string) error {
|
|
||||||
args := &graphDriverRequest{ID: id}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
MountLabel: mountLabel,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
if ret.Err != "" {
|
|
||||||
err = errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return filepath.Join(d.p.BasePath(), ret.Dir), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Put(id string) error {
|
|
||||||
args := &graphDriverRequest{ID: id}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Exists(id string) bool {
|
|
||||||
args := &graphDriverRequest{ID: id}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return ret.Exists
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Status() [][2]string {
|
|
||||||
args := &graphDriverRequest{}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return ret.Status
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return nil, errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return ret.Metadata, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Cleanup() error {
|
|
||||||
if !d.p.IsV1() {
|
|
||||||
if cp, ok := d.p.(plugingetter.CountedPlugin); ok {
|
|
||||||
// always release
|
|
||||||
defer cp.Release()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
args := &graphDriverRequest{}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
Parent: parent,
|
|
||||||
}
|
|
||||||
body, err := d.p.Client().Stream("GraphDriver.Diff", args)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
Parent: parent,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return nil, errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret.Changes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return -1, errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return ret.Size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
Parent: parent,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return -1, errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return ret.Size, nil
|
|
||||||
}
|
|
173
vendor/github.com/docker/docker/image/fs.go
generated
vendored
173
vendor/github.com/docker/docker/image/fs.go
generated
vendored
|
@ -1,173 +0,0 @@
|
||||||
package image
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/distribution/digest"
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DigestWalkFunc is function called by StoreBackend.Walk
|
|
||||||
type DigestWalkFunc func(id digest.Digest) error
|
|
||||||
|
|
||||||
// StoreBackend provides interface for image.Store persistence
|
|
||||||
type StoreBackend interface {
|
|
||||||
Walk(f DigestWalkFunc) error
|
|
||||||
Get(id digest.Digest) ([]byte, error)
|
|
||||||
Set(data []byte) (digest.Digest, error)
|
|
||||||
Delete(id digest.Digest) error
|
|
||||||
SetMetadata(id digest.Digest, key string, data []byte) error
|
|
||||||
GetMetadata(id digest.Digest, key string) ([]byte, error)
|
|
||||||
DeleteMetadata(id digest.Digest, key string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// fs implements StoreBackend using the filesystem.
|
|
||||||
type fs struct {
|
|
||||||
sync.RWMutex
|
|
||||||
root string
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
contentDirName = "content"
|
|
||||||
metadataDirName = "metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewFSStoreBackend returns new filesystem based backend for image.Store
|
|
||||||
func NewFSStoreBackend(root string) (StoreBackend, error) {
|
|
||||||
return newFSStore(root)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFSStore(root string) (*fs, error) {
|
|
||||||
s := &fs{
|
|
||||||
root: root,
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *fs) contentFile(dgst digest.Digest) string {
|
|
||||||
return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *fs) metadataDir(dgst digest.Digest) string {
|
|
||||||
return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk calls the supplied callback for each image ID in the storage backend.
|
|
||||||
func (s *fs) Walk(f DigestWalkFunc) error {
|
|
||||||
// Only Canonical digest (sha256) is currently supported
|
|
||||||
s.RLock()
|
|
||||||
dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical)))
|
|
||||||
s.RUnlock()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, v := range dir {
|
|
||||||
dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name())
|
|
||||||
if err := dgst.Validate(); err != nil {
|
|
||||||
logrus.Debugf("Skipping invalid digest %s: %s", dgst, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := f(dgst); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the content stored under a given digest.
|
|
||||||
func (s *fs) Get(dgst digest.Digest) ([]byte, error) {
|
|
||||||
s.RLock()
|
|
||||||
defer s.RUnlock()
|
|
||||||
|
|
||||||
return s.get(dgst)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *fs) get(dgst digest.Digest) ([]byte, error) {
|
|
||||||
content, err := ioutil.ReadFile(s.contentFile(dgst))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// todo: maybe optional
|
|
||||||
if digest.FromBytes(content) != dgst {
|
|
||||||
return nil, fmt.Errorf("failed to verify: %v", dgst)
|
|
||||||
}
|
|
||||||
|
|
||||||
return content, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set stores content by checksum.
|
|
||||||
func (s *fs) Set(data []byte) (digest.Digest, error) {
|
|
||||||
s.Lock()
|
|
||||||
defer s.Unlock()
|
|
||||||
|
|
||||||
if len(data) == 0 {
|
|
||||||
return "", fmt.Errorf("Invalid empty data")
|
|
||||||
}
|
|
||||||
|
|
||||||
dgst := digest.FromBytes(data)
|
|
||||||
if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return dgst, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes content and metadata files associated with the digest.
|
|
||||||
func (s *fs) Delete(dgst digest.Digest) error {
|
|
||||||
s.Lock()
|
|
||||||
defer s.Unlock()
|
|
||||||
|
|
||||||
if err := os.RemoveAll(s.metadataDir(dgst)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := os.Remove(s.contentFile(dgst)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMetadata sets metadata for a given ID. It fails if there's no base file.
|
|
||||||
func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error {
|
|
||||||
s.Lock()
|
|
||||||
defer s.Unlock()
|
|
||||||
if _, err := s.get(dgst); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
baseDir := filepath.Join(s.metadataDir(dgst))
|
|
||||||
if err := os.MkdirAll(baseDir, 0700); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetadata returns metadata for a given digest.
|
|
||||||
func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) {
|
|
||||||
s.RLock()
|
|
||||||
defer s.RUnlock()
|
|
||||||
|
|
||||||
if _, err := s.get(dgst); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key))
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteMetadata removes the metadata associated with a digest.
|
|
||||||
func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error {
|
|
||||||
s.Lock()
|
|
||||||
defer s.Unlock()
|
|
||||||
|
|
||||||
return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key))
|
|
||||||
}
|
|
150
vendor/github.com/docker/docker/image/image.go
generated
vendored
150
vendor/github.com/docker/docker/image/image.go
generated
vendored
|
@ -1,150 +0,0 @@
|
||||||
package image
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/distribution/digest"
|
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ID is the content-addressable ID of an image.
|
|
||||||
type ID digest.Digest
|
|
||||||
|
|
||||||
func (id ID) String() string {
|
|
||||||
return id.Digest().String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Digest converts ID into a digest
|
|
||||||
func (id ID) Digest() digest.Digest {
|
|
||||||
return digest.Digest(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDFromDigest creates an ID from a digest
|
|
||||||
func IDFromDigest(digest digest.Digest) ID {
|
|
||||||
return ID(digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
// V1Image stores the V1 image configuration.
|
|
||||||
type V1Image struct {
|
|
||||||
// ID a unique 64 character identifier of the image
|
|
||||||
ID string `json:"id,omitempty"`
|
|
||||||
// Parent id of the image
|
|
||||||
Parent string `json:"parent,omitempty"`
|
|
||||||
// Comment user added comment
|
|
||||||
Comment string `json:"comment,omitempty"`
|
|
||||||
// Created timestamp when image was created
|
|
||||||
Created time.Time `json:"created"`
|
|
||||||
// Container is the id of the container used to commit
|
|
||||||
Container string `json:"container,omitempty"`
|
|
||||||
// ContainerConfig is the configuration of the container that is committed into the image
|
|
||||||
ContainerConfig container.Config `json:"container_config,omitempty"`
|
|
||||||
// DockerVersion specifies version on which image is built
|
|
||||||
DockerVersion string `json:"docker_version,omitempty"`
|
|
||||||
// Author of the image
|
|
||||||
Author string `json:"author,omitempty"`
|
|
||||||
// Config is the configuration of the container received from the client
|
|
||||||
Config *container.Config `json:"config,omitempty"`
|
|
||||||
// Architecture is the hardware that the image is build and runs on
|
|
||||||
Architecture string `json:"architecture,omitempty"`
|
|
||||||
// OS is the operating system used to build and run the image
|
|
||||||
OS string `json:"os,omitempty"`
|
|
||||||
// Size is the total size of the image including all layers it is composed of
|
|
||||||
Size int64 `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Image stores the image configuration
|
|
||||||
type Image struct {
|
|
||||||
V1Image
|
|
||||||
Parent ID `json:"parent,omitempty"`
|
|
||||||
RootFS *RootFS `json:"rootfs,omitempty"`
|
|
||||||
History []History `json:"history,omitempty"`
|
|
||||||
OSVersion string `json:"os.version,omitempty"`
|
|
||||||
OSFeatures []string `json:"os.features,omitempty"`
|
|
||||||
|
|
||||||
// rawJSON caches the immutable JSON associated with this image.
|
|
||||||
rawJSON []byte
|
|
||||||
|
|
||||||
// computedID is the ID computed from the hash of the image config.
|
|
||||||
// Not to be confused with the legacy V1 ID in V1Image.
|
|
||||||
computedID ID
|
|
||||||
}
|
|
||||||
|
|
||||||
// RawJSON returns the immutable JSON associated with the image.
|
|
||||||
func (img *Image) RawJSON() []byte {
|
|
||||||
return img.rawJSON
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the image's content-addressable ID.
|
|
||||||
func (img *Image) ID() ID {
|
|
||||||
return img.computedID
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageID stringifies ID.
|
|
||||||
func (img *Image) ImageID() string {
|
|
||||||
return img.ID().String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunConfig returns the image's container config.
|
|
||||||
func (img *Image) RunConfig() *container.Config {
|
|
||||||
return img.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON serializes the image to JSON. It sorts the top-level keys so
|
|
||||||
// that JSON that's been manipulated by a push/pull cycle with a legacy
|
|
||||||
// registry won't end up with a different key order.
|
|
||||||
func (img *Image) MarshalJSON() ([]byte, error) {
|
|
||||||
type MarshalImage Image
|
|
||||||
|
|
||||||
pass1, err := json.Marshal(MarshalImage(*img))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var c map[string]*json.RawMessage
|
|
||||||
if err := json.Unmarshal(pass1, &c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return json.Marshal(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// History stores build commands that were used to create an image
|
|
||||||
type History struct {
|
|
||||||
// Created timestamp for build point
|
|
||||||
Created time.Time `json:"created"`
|
|
||||||
// Author of the build point
|
|
||||||
Author string `json:"author,omitempty"`
|
|
||||||
// CreatedBy keeps the Dockerfile command used while building image.
|
|
||||||
CreatedBy string `json:"created_by,omitempty"`
|
|
||||||
// Comment is custom message set by the user when creating the image.
|
|
||||||
Comment string `json:"comment,omitempty"`
|
|
||||||
// EmptyLayer is set to true if this history item did not generate a
|
|
||||||
// layer. Otherwise, the history item is associated with the next
|
|
||||||
// layer in the RootFS section.
|
|
||||||
EmptyLayer bool `json:"empty_layer,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exporter provides interface for exporting and importing images
|
|
||||||
type Exporter interface {
|
|
||||||
Load(io.ReadCloser, io.Writer, bool) error
|
|
||||||
// TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error
|
|
||||||
Save([]string, io.Writer) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFromJSON creates an Image configuration from json.
|
|
||||||
func NewFromJSON(src []byte) (*Image, error) {
|
|
||||||
img := &Image{}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(src, img); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if img.RootFS == nil {
|
|
||||||
return nil, errors.New("Invalid image JSON, no RootFS key.")
|
|
||||||
}
|
|
||||||
|
|
||||||
img.rawJSON = src
|
|
||||||
|
|
||||||
return img, nil
|
|
||||||
}
|
|
44
vendor/github.com/docker/docker/image/rootfs.go
generated
vendored
44
vendor/github.com/docker/docker/image/rootfs.go
generated
vendored
|
@ -1,44 +0,0 @@
|
||||||
package image
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/docker/layer"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TypeLayers is used for RootFS.Type for filesystems organized into layers.
|
|
||||||
const TypeLayers = "layers"
|
|
||||||
|
|
||||||
// typeLayersWithBase is an older format used by Windows up to v1.12. We
|
|
||||||
// explicitly handle this as an error case to ensure that a daemon which still
|
|
||||||
// has an older image like this on disk can still start, even though the
|
|
||||||
// image itself is not usable. See https://github.com/docker/docker/pull/25806.
|
|
||||||
const typeLayersWithBase = "layers+base"
|
|
||||||
|
|
||||||
// RootFS describes images root filesystem
|
|
||||||
// This is currently a placeholder that only supports layers. In the future
|
|
||||||
// this can be made into an interface that supports different implementations.
|
|
||||||
type RootFS struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
DiffIDs []layer.DiffID `json:"diff_ids,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRootFS returns empty RootFS struct
|
|
||||||
func NewRootFS() *RootFS {
|
|
||||||
return &RootFS{Type: TypeLayers}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append appends a new diffID to rootfs
|
|
||||||
func (r *RootFS) Append(id layer.DiffID) {
|
|
||||||
r.DiffIDs = append(r.DiffIDs, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChainID returns the ChainID for the top layer in RootFS.
|
|
||||||
func (r *RootFS) ChainID() layer.ChainID {
|
|
||||||
if runtime.GOOS == "windows" && r.Type == typeLayersWithBase {
|
|
||||||
logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs)
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return layer.CreateChainID(r.DiffIDs)
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue