From 044d87d96d6ba0bc7602566278e7b92d3b09dce2 Mon Sep 17 00:00:00 2001 From: Ludovic Fernandez Date: Tue, 9 Jan 2018 21:46:04 +0100 Subject: [PATCH 01/11] Switch to golang/dep. --- CONTRIBUTING.md | 22 +- Gopkg.lock | 1390 ++++ Gopkg.toml | 191 + Makefile | 5 +- build.Dockerfile | 16 +- glide.lock | 860 --- glide.yaml | 240 - script/glide.sh | 111 - script/prune-dep.sh | 34 + script/validate-glide | 13 - script/validate-vendor | 17 +- vendor/github.com/BurntSushi/toml/COPYING | 27 +- vendor/github.com/BurntSushi/toml/lex.go | 2 +- .../aws/aws-sdk-go/service/generate.go | 5 + vendor/github.com/boltdb/bolt/LICENSE | 20 - vendor/github.com/boltdb/bolt/bolt_386.go | 10 - vendor/github.com/boltdb/bolt/bolt_amd64.go | 10 - vendor/github.com/boltdb/bolt/bolt_arm.go | 28 - vendor/github.com/boltdb/bolt/bolt_arm64.go | 12 - vendor/github.com/boltdb/bolt/bolt_linux.go | 10 - vendor/github.com/boltdb/bolt/bolt_openbsd.go | 27 - vendor/github.com/boltdb/bolt/bolt_ppc.go | 9 - vendor/github.com/boltdb/bolt/bolt_ppc64.go | 12 - vendor/github.com/boltdb/bolt/bolt_ppc64le.go | 12 - vendor/github.com/boltdb/bolt/bolt_s390x.go | 12 - vendor/github.com/boltdb/bolt/bolt_unix.go | 89 - .../boltdb/bolt/bolt_unix_solaris.go | 90 - vendor/github.com/boltdb/bolt/bolt_windows.go | 144 - .../github.com/boltdb/bolt/boltsync_unix.go | 8 - vendor/github.com/boltdb/bolt/bucket.go | 777 --- vendor/github.com/boltdb/bolt/cursor.go | 400 -- vendor/github.com/boltdb/bolt/db.go | 1039 --- vendor/github.com/boltdb/bolt/doc.go | 44 - vendor/github.com/boltdb/bolt/errors.go | 71 - vendor/github.com/boltdb/bolt/freelist.go | 252 - vendor/github.com/boltdb/bolt/node.go | 604 -- vendor/github.com/boltdb/bolt/page.go | 197 - vendor/github.com/boltdb/bolt/tx.go | 684 -- vendor/github.com/cenk/backoff/backoff.go | 2 +- vendor/github.com/cenk/backoff/exponential.go | 11 +- vendor/github.com/cenk/backoff/ticker.go | 11 +- vendor/github.com/cenk/backoff/tries.go | 35 + .../github.com/codahale/hdrhistogram/hdr.go | 83 +- vendor/github.com/coreos/bbolt/freelist.go | 9 +- vendor/github.com/coreos/etcd/auth/doc.go | 16 + vendor/github.com/coreos/etcd/auth/jwt.go | 137 + .../coreos/etcd/auth/range_perm_cache.go | 133 + .../coreos/etcd/auth/simple_token.go | 220 + vendor/github.com/coreos/etcd/auth/store.go | 1059 +++ .../coreos/etcd/etcdserver/api/capability.go | 86 + .../coreos/etcd/etcdserver/api/cluster.go | 41 + .../coreos/etcd/etcdserver/api/doc.go | 16 + .../coreos/etcd/etcdserver/api/v3rpc/auth.go | 157 + .../coreos/etcd/etcdserver/api/v3rpc/codec.go | 34 + .../coreos/etcd/etcdserver/api/v3rpc/grpc.go | 53 + .../etcd/etcdserver/api/v3rpc/header.go | 46 + .../etcd/etcdserver/api/v3rpc/interceptor.go | 144 + .../coreos/etcd/etcdserver/api/v3rpc/key.go | 259 + .../coreos/etcd/etcdserver/api/v3rpc/lease.go | 123 + .../etcd/etcdserver/api/v3rpc/maintenance.go | 190 + .../etcd/etcdserver/api/v3rpc/member.go | 103 + .../etcd/etcdserver/api/v3rpc/metrics.go | 38 + .../coreos/etcd/etcdserver/api/v3rpc/quota.go | 89 + .../coreos/etcd/etcdserver/api/v3rpc/util.go | 103 + .../coreos/etcd/etcdserver/api/v3rpc/watch.go | 426 ++ .../coreos/etcd/etcdserver/apply.go | 878 +++ .../coreos/etcd/etcdserver/apply_auth.go | 196 + .../coreos/etcd/etcdserver/apply_v2.go | 140 + .../coreos/etcd/etcdserver/backend.go | 81 + .../coreos/etcd/etcdserver/cluster_util.go | 258 + .../coreos/etcd/etcdserver/config.go | 204 + .../etcd/etcdserver/consistent_index.go | 33 + .../github.com/coreos/etcd/etcdserver/doc.go | 16 + .../coreos/etcd/etcdserver/errors.go | 46 + .../coreos/etcd/etcdserver/metrics.go | 102 + .../coreos/etcd/etcdserver/quota.go | 121 + .../github.com/coreos/etcd/etcdserver/raft.go | 594 ++ .../coreos/etcd/etcdserver/server.go | 1659 +++++ .../coreos/etcd/etcdserver/snapshot_merge.go | 73 + .../coreos/etcd/etcdserver/storage.go | 98 + .../github.com/coreos/etcd/etcdserver/util.go | 97 + .../coreos/etcd/etcdserver/v2_server.go | 125 + .../coreos/etcd/etcdserver/v3_server.go | 692 ++ vendor/github.com/coreos/etcd/mvcc/doc.go | 16 + vendor/github.com/coreos/etcd/mvcc/index.go | 219 + .../github.com/coreos/etcd/mvcc/key_index.go | 332 + vendor/github.com/coreos/etcd/mvcc/kv.go | 147 + vendor/github.com/coreos/etcd/mvcc/kv_view.go | 53 + vendor/github.com/coreos/etcd/mvcc/kvstore.go | 459 ++ .../coreos/etcd/mvcc/kvstore_compaction.go | 66 + .../coreos/etcd/mvcc/kvstore_txn.go | 253 + vendor/github.com/coreos/etcd/mvcc/metrics.go | 174 + .../coreos/etcd/mvcc/metrics_txn.go | 67 + .../github.com/coreos/etcd/mvcc/revision.go | 67 + vendor/github.com/coreos/etcd/mvcc/util.go | 56 + .../coreos/etcd/mvcc/watchable_store.go | 522 ++ .../coreos/etcd/mvcc/watchable_store_txn.go | 53 + vendor/github.com/coreos/etcd/mvcc/watcher.go | 171 + .../coreos/etcd/mvcc/watcher_group.go | 283 + vendor/github.com/docker/cli/cli/cobra.go | 150 + .../github.com/docker/cli/cli/command/cli.go | 305 + .../docker/cli/cli/command/events_utils.go | 47 + .../docker/cli/cli/command/image/build.go | 500 ++ .../docker/cli/cli/command/image/cmd.go | 34 + .../docker/cli/cli/command/image/history.go | 64 + .../docker/cli/cli/command/image/import.go | 87 + .../docker/cli/cli/command/image/inspect.go | 44 + .../docker/cli/cli/command/image/list.go | 95 + .../docker/cli/cli/command/image/load.go | 77 + .../docker/cli/cli/command/image/prune.go | 95 + .../docker/cli/cli/command/image/pull.go | 85 + .../docker/cli/cli/command/image/push.go | 61 + .../docker/cli/cli/command/image/remove.go | 78 + .../docker/cli/cli/command/image/save.go | 56 + .../docker/cli/cli/command/image/tag.go | 41 + .../docker/cli/cli/command/image/trust.go | 384 ++ .../github.com/docker/cli/cli/command/in.go | 56 + .../github.com/docker/cli/cli/command/out.go | 50 + .../docker/cli/cli/command/registry.go | 189 + .../docker/cli/cli/command/stream.go | 34 + .../docker/cli/cli/command/trust.go | 43 + .../docker/cli/cli/command/utils.go | 119 + vendor/github.com/docker/cli/cli/error.go | 33 + vendor/github.com/docker/cli/cli/required.go | 96 + vendor/github.com/docker/cli/cli/version.go | 9 + .../docker/distribution/registry/doc.go | 2 + .../docker/distribution/registry/registry.go | 356 + .../docker/distribution/registry/root.go | 84 + .../registry/storage/blobcachemetrics.go | 60 + .../registry/storage/blobserver.go | 78 + .../registry/storage/blobstore.go | 223 + .../registry/storage/blobwriter.go | 400 ++ .../storage/blobwriter_nonresumable.go | 17 + .../registry/storage/blobwriter_resumable.go | 145 + .../distribution/registry/storage/catalog.go | 153 + .../distribution/registry/storage/doc.go | 3 + .../registry/storage/filereader.go | 177 + .../registry/storage/garbagecollect.go | 114 + .../registry/storage/linkedblobstore.go | 470 ++ .../registry/storage/manifestlisthandler.go | 92 + .../registry/storage/manifeststore.go | 141 + .../distribution/registry/storage/paths.go | 490 ++ .../registry/storage/purgeuploads.go | 139 + .../distribution/registry/storage/registry.go | 306 + .../storage/schema2manifesthandler.go | 136 + .../registry/storage/signedmanifesthandler.go | 141 + .../distribution/registry/storage/tagstore.go | 191 + .../distribution/registry/storage/util.go | 21 + .../distribution/registry/storage/vacuum.go | 67 + .../distribution/registry/storage/walk.go | 59 + .../docker/docker/builder/builder.go | 99 + vendor/github.com/docker/docker/cli/cli.go | 25 + vendor/github.com/docker/docker/cli/cobra.go | 150 + vendor/github.com/docker/docker/cli/error.go | 33 + .../github.com/docker/docker/cli/required.go | 96 + .../docker/docker/runconfig/config.go | 108 + .../docker/docker/runconfig/config_unix.go | 59 + .../docker/docker/runconfig/config_windows.go | 19 + .../docker/docker/runconfig/errors.go | 38 + .../docker/docker/runconfig/hostconfig.go | 80 + .../docker/runconfig/hostconfig_solaris.go | 46 + .../docker/runconfig/hostconfig_unix.go | 110 + .../docker/runconfig/hostconfig_windows.go | 96 + .../golang/protobuf/jsonpb/jsonpb.go | 829 --- .../github.com/golang/protobuf/ptypes/any.go | 136 + .../github.com/golang/protobuf/ptypes/doc.go | 35 + .../golang/protobuf/ptypes/duration.go | 102 + .../golang/protobuf/ptypes/timestamp.go | 125 + .../influxdata/influxdb/client/influxdb.go | 832 +++ .../mesos-dns/records/state/state.go | 6 +- .../mitchellh/mapstructure/mapstructure.go | 22 +- .../runc/libcontainer/capabilities_linux.go | 114 + .../runc/libcontainer/compat_1.5_linux.go | 10 + .../runc/libcontainer/console.go | 17 + .../runc/libcontainer/console_freebsd.go | 13 + .../runc/libcontainer/console_linux.go | 157 + .../runc/libcontainer/console_solaris.go | 11 + .../runc/libcontainer/console_windows.go | 30 + .../runc/libcontainer/container.go | 166 + .../runc/libcontainer/container_linux.go | 1579 +++++ .../runc/libcontainer/container_solaris.go | 20 + .../runc/libcontainer/container_windows.go | 20 + .../runc/libcontainer/criu_opts_unix.go | 39 + .../runc/libcontainer/criu_opts_windows.go | 6 + .../opencontainers/runc/libcontainer/error.go | 70 + .../runc/libcontainer/factory.go | 44 + .../runc/libcontainer/factory_linux.go | 337 + .../runc/libcontainer/generic_error.go | 92 + .../runc/libcontainer/init_linux.go | 500 ++ .../runc/libcontainer/message_linux.go | 91 + .../runc/libcontainer/network_linux.go | 259 + .../runc/libcontainer/notify_linux.go | 89 + .../runc/libcontainer/process.go | 106 + .../runc/libcontainer/process_linux.go | 483 ++ .../runc/libcontainer/restored_process.go | 122 + .../runc/libcontainer/rootfs_linux.go | 812 +++ .../runc/libcontainer/setgroups_linux.go | 11 + .../runc/libcontainer/setns_init_linux.go | 63 + .../runc/libcontainer/standard_init_linux.go | 190 + .../runc/libcontainer/state_linux.go | 247 + .../opencontainers/runc/libcontainer/stats.go | 15 + .../runc/libcontainer/stats_freebsd.go | 5 + .../runc/libcontainer/stats_linux.go | 8 + .../runc/libcontainer/stats_solaris.go | 7 + .../runc/libcontainer/stats_windows.go | 5 + .../opencontainers/runc/libcontainer/sync.go | 107 + .../github.com/vulcand/oxy/stream/stream.go | 92 - .../vulcand/oxy/stream/threshold.go | 225 - vendor/golang.org/x/text/internal/gen/code.go | 351 + vendor/golang.org/x/text/internal/gen/gen.go | 281 + .../x/text/internal/triegen/compact.go | 58 + .../x/text/internal/triegen/print.go | 251 + .../x/text/internal/triegen/triegen.go | 494 ++ vendor/golang.org/x/text/internal/ucd/ucd.go | 376 ++ vendor/golang.org/x/text/secure/doc.go | 6 + vendor/golang.org/x/text/unicode/cldr/base.go | 100 + vendor/golang.org/x/text/unicode/cldr/cldr.go | 130 + .../golang.org/x/text/unicode/cldr/collate.go | 359 ++ .../golang.org/x/text/unicode/cldr/decode.go | 171 + .../golang.org/x/text/unicode/cldr/makexml.go | 400 ++ .../golang.org/x/text/unicode/cldr/resolve.go | 602 ++ .../golang.org/x/text/unicode/cldr/slice.go | 144 + vendor/golang.org/x/text/unicode/cldr/xml.go | 1456 +++++ vendor/golang.org/x/text/unicode/doc.go | 8 + .../x/text/unicode/rangetable/gen.go | 113 + .../x/text/unicode/rangetable/merge.go | 260 + .../x/text/unicode/rangetable/rangetable.go | 70 + .../x/text/unicode/rangetable/tables.go | 5735 +++++++++++++++++ .../grpc/grpclb/grpclb_server_generated.go | 72 + .../gopkg.in/ns1/ns1-go.v2/rest/model/doc.go | 2 + .../gopkg.in/ns1/ns1-go.v2/rest/model/stat.go | 15 + .../client-go/tools/clientcmd/auth_loaders.go | 106 + .../tools/clientcmd/client_config.go | 501 ++ .../client-go/tools/clientcmd/config.go | 472 ++ .../k8s.io/client-go/tools/clientcmd/doc.go | 37 + .../client-go/tools/clientcmd/loader.go | 609 ++ .../tools/clientcmd/merged_client_builder.go | 154 + .../client-go/tools/clientcmd/overrides.go | 206 + .../client-go/tools/clientcmd/validation.go | 270 + 239 files changed, 42372 insertions(+), 7011 deletions(-) create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml delete mode 100644 glide.lock delete mode 100644 glide.yaml delete mode 100755 script/glide.sh create mode 100755 script/prune-dep.sh delete mode 100755 script/validate-glide create mode 100644 vendor/github.com/aws/aws-sdk-go/service/generate.go delete mode 100644 vendor/github.com/boltdb/bolt/LICENSE delete mode 100644 vendor/github.com/boltdb/bolt/bolt_386.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_amd64.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_arm.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_arm64.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_linux.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_openbsd.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc64.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc64le.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_s390x.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_unix.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_unix_solaris.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_windows.go delete mode 100644 vendor/github.com/boltdb/bolt/boltsync_unix.go delete mode 100644 vendor/github.com/boltdb/bolt/bucket.go delete mode 100644 vendor/github.com/boltdb/bolt/cursor.go delete mode 100644 vendor/github.com/boltdb/bolt/db.go delete mode 100644 vendor/github.com/boltdb/bolt/doc.go delete mode 100644 vendor/github.com/boltdb/bolt/errors.go delete mode 100644 vendor/github.com/boltdb/bolt/freelist.go delete mode 100644 vendor/github.com/boltdb/bolt/node.go delete mode 100644 vendor/github.com/boltdb/bolt/page.go delete mode 100644 vendor/github.com/boltdb/bolt/tx.go create mode 100644 vendor/github.com/cenk/backoff/tries.go create mode 100644 vendor/github.com/coreos/etcd/auth/doc.go create mode 100644 vendor/github.com/coreos/etcd/auth/jwt.go create mode 100644 vendor/github.com/coreos/etcd/auth/range_perm_cache.go create mode 100644 vendor/github.com/coreos/etcd/auth/simple_token.go create mode 100644 vendor/github.com/coreos/etcd/auth/store.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/capability.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/cluster.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/doc.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/apply.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/apply_auth.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/apply_v2.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/backend.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/cluster_util.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/config.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/consistent_index.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/doc.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/errors.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/metrics.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/quota.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/raft.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/server.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/storage.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/util.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/v2_server.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/v3_server.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/doc.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/index.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/key_index.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/kv.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/kv_view.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/kvstore.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/metrics.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/metrics_txn.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/revision.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/util.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/watchable_store.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/watcher.go create mode 100644 vendor/github.com/coreos/etcd/mvcc/watcher_group.go create mode 100644 vendor/github.com/docker/cli/cli/cobra.go create mode 100644 vendor/github.com/docker/cli/cli/command/cli.go create mode 100644 vendor/github.com/docker/cli/cli/command/events_utils.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/build.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/cmd.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/history.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/import.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/inspect.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/list.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/load.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/prune.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/pull.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/push.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/remove.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/save.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/tag.go create mode 100644 vendor/github.com/docker/cli/cli/command/image/trust.go create mode 100644 vendor/github.com/docker/cli/cli/command/in.go create mode 100644 vendor/github.com/docker/cli/cli/command/out.go create mode 100644 vendor/github.com/docker/cli/cli/command/registry.go create mode 100644 vendor/github.com/docker/cli/cli/command/stream.go create mode 100644 vendor/github.com/docker/cli/cli/command/trust.go create mode 100644 vendor/github.com/docker/cli/cli/command/utils.go create mode 100644 vendor/github.com/docker/cli/cli/error.go create mode 100644 vendor/github.com/docker/cli/cli/required.go create mode 100644 vendor/github.com/docker/cli/cli/version.go create mode 100644 vendor/github.com/docker/distribution/registry/doc.go create mode 100644 vendor/github.com/docker/distribution/registry/registry.go create mode 100644 vendor/github.com/docker/distribution/registry/root.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blobserver.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blobstore.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blobwriter.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/catalog.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/doc.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/filereader.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/garbagecollect.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/manifeststore.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/paths.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/purgeuploads.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/registry.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/tagstore.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/util.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/vacuum.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/walk.go create mode 100644 vendor/github.com/docker/docker/builder/builder.go create mode 100644 vendor/github.com/docker/docker/cli/cli.go create mode 100644 vendor/github.com/docker/docker/cli/cobra.go create mode 100644 vendor/github.com/docker/docker/cli/error.go create mode 100644 vendor/github.com/docker/docker/cli/required.go create mode 100644 vendor/github.com/docker/docker/runconfig/config.go create mode 100644 vendor/github.com/docker/docker/runconfig/config_unix.go create mode 100644 vendor/github.com/docker/docker/runconfig/config_windows.go create mode 100644 vendor/github.com/docker/docker/runconfig/errors.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_unix.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_windows.go delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/influxdata/influxdb/client/influxdb.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/error.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/factory.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/generic_error.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/init_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/message_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/network_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/process.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/process_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/restored_process.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/state_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/sync.go delete mode 100644 vendor/github.com/vulcand/oxy/stream/stream.go delete mode 100644 vendor/github.com/vulcand/oxy/stream/threshold.go create mode 100644 vendor/golang.org/x/text/internal/gen/code.go create mode 100644 vendor/golang.org/x/text/internal/gen/gen.go create mode 100644 vendor/golang.org/x/text/internal/triegen/compact.go create mode 100644 vendor/golang.org/x/text/internal/triegen/print.go create mode 100644 vendor/golang.org/x/text/internal/triegen/triegen.go create mode 100644 vendor/golang.org/x/text/internal/ucd/ucd.go create mode 100644 vendor/golang.org/x/text/secure/doc.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/base.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/cldr.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/collate.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/decode.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/makexml.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/resolve.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/slice.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/xml.go create mode 100644 vendor/golang.org/x/text/unicode/doc.go create mode 100644 vendor/golang.org/x/text/unicode/rangetable/gen.go create mode 100644 vendor/golang.org/x/text/unicode/rangetable/merge.go create mode 100644 vendor/golang.org/x/text/unicode/rangetable/rangetable.go create mode 100644 vendor/golang.org/x/text/unicode/rangetable/tables.go create mode 100644 vendor/google.golang.org/grpc/grpclb/grpclb_server_generated.go create mode 100644 vendor/gopkg.in/ns1/ns1-go.v2/rest/model/doc.go create mode 100644 vendor/gopkg.in/ns1/ns1-go.v2/rest/model/stat.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/client_config.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/config.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/doc.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/loader.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/overrides.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/validation.go diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 463f93338..0146fa0e7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,8 @@ ## Building -You need either [Docker](https://github.com/docker/docker) and `make` (Method 1), or `go` (Method 2) in order to build Traefik. For changes to its dependencies, the `glide` dependency management tool and `glide-vc` plugin are required. +You need either [Docker](https://github.com/docker/docker) and `make` (Method 1), or `go` (Method 2) in order to build Traefik. +For changes to its dependencies, the `dep` dependency management tool is required. ### Method 1: Using `Docker` and `Makefile` @@ -14,7 +15,7 @@ docker build -t "traefik-dev:no-more-godep-ever" -f build.Dockerfile . Sending build context to Docker daemon 295.3 MB Step 0 : FROM golang:1.9-alpine ---> 8c6473912976 -Step 1 : RUN go get github.com/Masterminds/glide +Step 1 : RUN go get github.com/golang/dep/cmd/dep [...] docker run --rm -v "/var/run/docker.sock:/var/run/docker.sock" -it -e OS_ARCH_ARG -e OS_PLATFORM_ARG -e TESTFLAGS -v "/home/user/go/src/github.com/containous/traefik/"dist":/go/src/github.com/containous/traefik/"dist"" "traefik-dev:no-more-godep-ever" ./script/make.sh generate binary ---> Making bundle: generate (in .) @@ -82,21 +83,20 @@ You will find the Træfik executable in the `~/go/src/github.com/containous/trae If you happen to update the provider templates (in `/templates`), you need to run `go generate` to update the `autogen` package. -### Setting up `glide` and `glide-vc` for dependency management +### Setting up dependency management -- Glide is not required for building; however, it is necessary to modify dependencies (i.e., add, update, or remove third-party packages) -- Glide can be installed either via homebrew: `$ brew install glide` or via the official glide script: `$ curl https://glide.sh/get | sh` -- The glide plugin `glide-vc` must be installed from source: `go get github.com/sgotti/glide-vc` +[dep](https://github.com/golang/dep) is not required for building; however, it is necessary to modify dependencies (i.e., add, update, or remove third-party packages) -If you want to add a dependency, use `$ glide get` to have glide put it into the vendor folder and update the glide manifest/lock files (`glide.yaml` and `glide.lock`, respectively). A following `glide-vc` run should be triggered to trim down the size of the vendor folder. The final result must be committed into VCS. +If you want to add a dependency, use `dep ensure -add` to have [dep](https://github.com/golang/dep) put it into the vendor folder and update the dep manifest/lock files (`Gopkg.toml` and `Gopkg.lock`, respectively). -Care must be taken to choose the right arguments to `glide` when dealing with dependencies, or otherwise risk ending up with a broken build. For that reason, the helper script `script/glide.sh` encapsulates the gory details and conveniently calls `glide-vc` as well. Call it without parameters for basic usage instructions. +A following `make prune-dep` run should be triggered to trim down the size of the vendor folder. +The final result must be committed into VCS. -Here's a full example using glide to add a new dependency: +Here's a full example using dep to add a new dependency: ```bash # install the new main dependency github.com/foo/bar and minimize vendor size -$ ./script/glide.sh get github.com/foo/bar +$ dep ensure -add github.com/foo/bar # generate (Only required to integrate other components such as web dashboard) $ go generate # Standard go build @@ -127,6 +127,7 @@ Test success ``` For development purposes, you can specify which tests to run by using: + ```bash # Run every tests in the MyTest suite TESTFLAGS="-check.f MyTestSuite" make test-integration @@ -146,6 +147,7 @@ More: https://labix.org/gocheck #### Method 2: `go` Unit tests can be run from the cloned directory by `$ go test ./...` which should return `ok` similar to: + ``` ok _/home/user/go/src/github/containous/traefik 0.004s ``` diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 000000000..2ffd74373 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,1390 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "cloud.google.com/go" + packages = [ + "compute/metadata", + "internal" + ] + revision = "2e6a95edb1071d750f6d7db777bf66cd2997af6c" + version = "v0.7.0" + +[[projects]] + branch = "master" + name = "github.com/ArthurHlt/go-eureka-client" + packages = ["eureka"] + revision = "9d0a49cbd39aa3634ae1977e9f519a262b10adaf" + +[[projects]] + branch = "master" + name = "github.com/ArthurHlt/gominlog" + packages = ["."] + revision = "72eebf980f467d3ab3a8b4ddf660f664911ce519" + +[[projects]] + name = "github.com/Azure/azure-sdk-for-go" + packages = ["arm/dns"] + revision = "f7bb4db3ea4c73dc58bd284c38ea644a79324be0" + +[[projects]] + branch = "master" + name = "github.com/Azure/go-ansiterm" + packages = [ + ".", + "winterm" + ] + revision = "d6e3b3328b783f23731bc4d058875b0371ff8109" + +[[projects]] + name = "github.com/Azure/go-autorest" + packages = [ + "autorest", + "autorest/adal", + "autorest/azure", + "autorest/date", + "autorest/to" + ] + revision = "f6be1abbb5abd0517522f850dd785990d373da7e" + version = "v9.0.0" + +[[projects]] + branch = "master" + name = "github.com/BurntSushi/toml" + packages = ["."] + revision = "a368813c5e648fee92e5f6c30e3944ff9d5e8895" + +[[projects]] + branch = "master" + name = "github.com/BurntSushi/ty" + packages = [ + ".", + "fun" + ] + revision = "6add9cd6ad42d389d6ead1dde60b4ad71e46fd74" + +[[projects]] + name = "github.com/JamesClonk/vultr" + packages = ["lib"] + revision = "2fd0705ce648e602e6c9c57329a174270a4f6688" + +[[projects]] + name = "github.com/Masterminds/semver" + packages = ["."] + revision = "59c29afe1a994eacb71c833025ca7acf874bb1da" + version = "v1.2.2" + +[[projects]] + name = "github.com/Masterminds/sprig" + packages = ["."] + revision = "e039e20e500c2c025d9145be375e27cf42a94174" + +[[projects]] + name = "github.com/Microsoft/go-winio" + packages = ["."] + revision = "f533f7a102197536779ea3a8cb881d639e21ec5a" + version = "v0.4.2" + +[[projects]] + branch = "master" + name = "github.com/NYTimes/gziphandler" + packages = ["."] + revision = "47ca22a0aeea4c9ceddfb935d818d636d934c312" + +[[projects]] + name = "github.com/Nvveen/Gotty" + packages = ["."] + revision = "6018b68f96b839edfbe3fb48668853f5dbad88a3" + source = "github.com/ijc25/Gotty" + +[[projects]] + name = "github.com/PuerkitoBio/purell" + packages = ["."] + revision = "8a290539e2e8629dbc4e6bad948158f790ec31f4" + version = "v1.0.0" + +[[projects]] + name = "github.com/PuerkitoBio/urlesc" + packages = ["."] + revision = "5bd2802263f21d8788851d5305584c82a5c75d7e" + +[[projects]] + name = "github.com/Sirupsen/logrus" + packages = ["."] + revision = "10f801ebc38b33738c9d17d50860f484a0988ff5" + +[[projects]] + name = "github.com/VividCortex/gohistogram" + packages = ["."] + revision = "51564d9861991fb0ad0f531c99ef602d0f9866e6" + version = "v1.0.0" + +[[projects]] + name = "github.com/abbot/go-http-auth" + packages = ["."] + revision = "0ddd408d5d60ea76e320503cc7dd091992dee608" + version = "v0.4.0" + +[[projects]] + name = "github.com/aokoli/goutils" + packages = ["."] + revision = "3391d3790d23d03408670993e957e8f408993c34" + version = "v1.0.1" + +[[projects]] + branch = "master" + name = "github.com/armon/go-proxyproto" + packages = ["."] + revision = "48572f11356f1843b694f21a290d4f1006bc5e47" + +[[projects]] + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/stscreds", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "private/protocol", + "private/protocol/ec2query", + "private/protocol/json/jsonutil", + "private/protocol/jsonrpc", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/restxml", + "private/protocol/xml/xmlutil", + "private/waiter", + "service/dynamodb", + "service/dynamodb/dynamodbattribute", + "service/dynamodb/dynamodbiface", + "service/ec2", + "service/ecs", + "service/route53", + "service/sts" + ] + revision = "3f8f870ec9939e32b3372abf74d24e468bcd285d" + version = "v1.6.18" + +[[projects]] + branch = "master" + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" + +[[projects]] + name = "github.com/blang/semver" + packages = ["."] + revision = "31b736133b98f26d5e078ec9eb591666edfd091f" + version = "v3.0.1" + +[[projects]] + branch = "master" + name = "github.com/cenk/backoff" + packages = ["."] + revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e" + +[[projects]] + branch = "master" + name = "github.com/codahale/hdrhistogram" + packages = ["."] + revision = "3a0bb77429bd3a61596f5e8a3172445844342120" + +[[projects]] + name = "github.com/codegangsta/cli" + packages = ["."] + revision = "bf4a526f48af7badd25d2cb02d587e1b01be3b50" + version = "v1.4.1" + +[[projects]] + name = "github.com/containous/flaeg" + packages = ["."] + revision = "60c87a513a955ca7225e1b1c772581cea8420cb4" + version = "v1.0.1" + +[[projects]] + branch = "master" + name = "github.com/containous/mux" + packages = ["."] + revision = "06ccd3e75091eb659b1d720cda0e16bc7057954c" + +[[projects]] + name = "github.com/containous/staert" + packages = ["."] + revision = "af517d5b70db9c4b0505e0144fcc62b054057d2a" + version = "v2.0.0" + +[[projects]] + name = "github.com/containous/traefik-extra-service-fabric" + packages = ["."] + revision = "ca1fb57108293caad285b1c366b763f6c6ab71c9" + version = "v1.0.5" + +[[projects]] + name = "github.com/coreos/bbolt" + packages = ["."] + revision = "32c383e75ce054674c53b5a07e55de85332aee14" + +[[projects]] + name = "github.com/coreos/etcd" + packages = [ + "auth/authpb", + "client", + "clientv3", + "clientv3/concurrency", + "etcdserver/api/v3rpc/rpctypes", + "etcdserver/etcdserverpb", + "mvcc/mvccpb", + "pkg/pathutil", + "pkg/srv", + "pkg/types", + "version" + ] + revision = "f1d7dd87da3e8feab4aaf675b8e29c6a5ed5f58b" + version = "v3.2.9" + +[[projects]] + name = "github.com/coreos/go-oidc" + packages = [ + "http", + "jose", + "key", + "oauth2", + "oidc" + ] + revision = "5644a2f50e2d2d5ba0b474bc5bc55fea1925936d" + +[[projects]] + name = "github.com/coreos/go-semver" + packages = ["semver"] + revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6" + version = "v0.2.0" + +[[projects]] + name = "github.com/coreos/go-systemd" + packages = ["daemon"] + revision = "48702e0da86bd25e76cfef347e2adeb434a0d0a6" + version = "v14" + +[[projects]] + name = "github.com/coreos/pkg" + packages = [ + "health", + "httputil", + "timeutil" + ] + revision = "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "04cdfd42973bb9c8589fd6a731800cf222fde1a9" + +[[projects]] + branch = "master" + name = "github.com/decker502/dnspod-go" + packages = ["."] + revision = "f33a2c6040fc2550a631de7b3a53bddccdcd73fb" + +[[projects]] + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c" + version = "v3.0.0" + +[[projects]] + name = "github.com/dnsimple/dnsimple-go" + packages = ["dnsimple"] + revision = "f2d9b723cc9547d182e24ac2e527ae25d25fc93f" + +[[projects]] + name = "github.com/docker/cli" + packages = [ + "cli/command/image/build", + "cli/config", + "cli/config/configfile" + ] + revision = "d95fd2f38cfc23e077530c6181330727d561b6a0" + +[[projects]] + name = "github.com/docker/distribution" + packages = [ + ".", + "context", + "digestset", + "reference", + "registry/api/errcode", + "registry/api/v2", + "registry/client", + "registry/client/auth", + "registry/client/auth/challenge", + "registry/client/transport", + "registry/storage/cache", + "registry/storage/cache/memory", + "uuid" + ] + revision = "b38e5838b7b2f2ad48e06ec4b500011976080621" + +[[projects]] + name = "github.com/docker/docker" + packages = [ + "api", + "api/types", + "api/types/blkiodev", + "api/types/container", + "api/types/events", + "api/types/filters", + "api/types/image", + "api/types/mount", + "api/types/network", + "api/types/registry", + "api/types/strslice", + "api/types/swarm", + "api/types/time", + "api/types/versions", + "api/types/volume", + "builder/dockerignore", + "client", + "opts", + "pkg/archive", + "pkg/fileutils", + "pkg/gitutils", + "pkg/homedir", + "pkg/httputils", + "pkg/idtools", + "pkg/ioutils", + "pkg/jsonlog", + "pkg/jsonmessage", + "pkg/longpath", + "pkg/mount", + "pkg/namesgenerator", + "pkg/pools", + "pkg/progress", + "pkg/promise", + "pkg/random", + "pkg/stdcopy", + "pkg/streamformatter", + "pkg/stringid", + "pkg/symlink", + "pkg/system", + "pkg/tarsum", + "pkg/term", + "pkg/term/windows", + "pkg/tlsconfig", + "pkg/urlutil", + "registry", + "runconfig/opts" + ] + revision = "75c7536d2e2e328b644bf69153de879d1d197988" + +[[projects]] + name = "github.com/docker/go-connections" + packages = [ + "nat", + "sockets", + "tlsconfig" + ] + revision = "e15c02316c12de00874640cd76311849de2aeed5" + +[[projects]] + name = "github.com/docker/go-units" + packages = ["."] + revision = "9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1" + +[[projects]] + branch = "master" + name = "github.com/docker/leadership" + packages = ["."] + revision = "af20da7d3e62be9259835e93261acf931b5adecf" + source = "github.com/containous/leadership" + +[[projects]] + name = "github.com/docker/libcompose" + packages = [ + "config", + "docker", + "docker/auth", + "docker/builder", + "docker/client", + "docker/container", + "docker/ctx", + "docker/image", + "docker/network", + "docker/service", + "docker/volume", + "labels", + "logger", + "lookup", + "project", + "project/events", + "project/options", + "utils", + "version", + "yaml" + ] + revision = "1b708aac26a4fc6f9bff31728a8e3a252ef57dbd" + +[[projects]] + branch = "master" + name = "github.com/docker/libkv" + packages = [ + ".", + "store", + "store/boltdb", + "store/consul", + "store/etcd/v2", + "store/etcd/v3", + "store/zookeeper" + ] + revision = "5e4bb288a9a74320bb03f5c18d6bdbab0d8049de" + source = "github.com/abronan/libkv" + +[[projects]] + name = "github.com/docker/libtrust" + packages = ["."] + revision = "9cbd2a1374f46905c68a4eb3694a130610adc62a" + +[[projects]] + name = "github.com/donovanhide/eventsource" + packages = ["."] + revision = "b8f31a59085e69dd2678cf51840db2ac625cb741" + +[[projects]] + name = "github.com/eapache/channels" + packages = ["."] + revision = "47238d5aae8c0fefd518ef2bee46290909cf8263" + version = "v1.1.0" + +[[projects]] + name = "github.com/eapache/queue" + packages = ["."] + revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" + version = "v1.1.0" + +[[projects]] + name = "github.com/edeckers/auroradnsclient" + packages = [ + ".", + "records", + "requests", + "requests/errors", + "tokens", + "zones" + ] + revision = "398f53855ba258191157e20fabfaccca5e13cea9" + +[[projects]] + branch = "master" + name = "github.com/elazarl/go-bindata-assetfs" + packages = ["."] + revision = "30f82fa23fd844bd5bb1e5f216db87fd77b5eb43" + +[[projects]] + name = "github.com/emicklei/go-restful" + packages = [ + ".", + "log", + "swagger" + ] + revision = "89ef8af493ab468a45a42bb0d89a06fccdd2fb22" + +[[projects]] + name = "github.com/exoscale/egoscale" + packages = ["."] + revision = "325740036187ddae3a5b74be00fbbc70011c4d96" + +[[projects]] + name = "github.com/fatih/color" + packages = ["."] + revision = "62e9147c64a1ed519147b62a56a14e83e2be02c1" + +[[projects]] + branch = "master" + name = "github.com/flynn/go-shlex" + packages = ["."] + revision = "3f9db97f856818214da2e1057f8ad84803971cff" + +[[projects]] + name = "github.com/gambol99/go-marathon" + packages = ["."] + revision = "03b46169666c53b9cc953b875ac5714e5103e064" + +[[projects]] + name = "github.com/ghodss/yaml" + packages = ["."] + revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee" + +[[projects]] + branch = "fork-containous" + name = "github.com/go-check/check" + packages = ["."] + revision = "ca0bf163426aa183d03fd4949101785c0347f273" + source = "github.com/containous/check" + +[[projects]] + name = "github.com/go-ini/ini" + packages = ["."] + revision = "f384f410798cbe7cdce40eec40b79ed32bb4f1ad" + +[[projects]] + name = "github.com/go-kit/kit" + packages = [ + "log", + "metrics", + "metrics/dogstatsd", + "metrics/generic", + "metrics/influx", + "metrics/internal/lv", + "metrics/internal/ratemap", + "metrics/multi", + "metrics/prometheus", + "metrics/statsd", + "util/conn" + ] + revision = "f66b0e13579bfc5a48b9e2a94b1209c107ea1f41" + version = "v0.3.0" + +[[projects]] + name = "github.com/go-logfmt/logfmt" + packages = ["."] + revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" + version = "v0.3.0" + +[[projects]] + name = "github.com/go-openapi/jsonpointer" + packages = ["."] + revision = "46af16f9f7b149af66e5d1bd010e3574dc06de98" + +[[projects]] + name = "github.com/go-openapi/jsonreference" + packages = ["."] + revision = "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" + +[[projects]] + name = "github.com/go-openapi/spec" + packages = ["."] + revision = "6aced65f8501fe1217321abf0749d354824ba2ff" + +[[projects]] + name = "github.com/go-openapi/swag" + packages = ["."] + revision = "1d0bd113de87027671077d3c71eb3ac5d7dbba72" + +[[projects]] + name = "github.com/go-stack/stack" + packages = ["."] + revision = "54be5f394ed2c3e19dac9134a40a95ba5a017f7b" + version = "v1.5.4" + +[[projects]] + name = "github.com/gogo/protobuf" + packages = [ + "proto", + "sortkeys" + ] + revision = "909568be09de550ed094403c2bf8a261b5bb730a" + version = "v0.3" + +[[projects]] + name = "github.com/golang/glog" + packages = ["."] + revision = "44145f04b68cf362d9c4df2182967c2275eaefed" + +[[projects]] + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes/any" + ] + revision = "4bd1920723d7b7c925de087aa32e2187708897f7" + +[[projects]] + name = "github.com/google/go-github" + packages = ["github"] + revision = "fe7d11f8add400587b6718d9f39a62e42cb04c28" + +[[projects]] + branch = "master" + name = "github.com/google/go-querystring" + packages = ["query"] + revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a" + +[[projects]] + name = "github.com/google/gofuzz" + packages = ["."] + revision = "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5" + +[[projects]] + name = "github.com/googleapis/gax-go" + packages = ["."] + revision = "9af46dd5a1713e8b5cd71106287eba3cefdde50b" + +[[projects]] + name = "github.com/gorilla/context" + packages = ["."] + revision = "215affda49addc4c8ef7e2534915df2c8c35c6cd" + +[[projects]] + name = "github.com/gorilla/mux" + packages = ["."] + revision = "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" + +[[projects]] + name = "github.com/gorilla/websocket" + packages = ["."] + revision = "a69d9f6de432e2c6b296a947d8a5ee88f68522cf" + +[[projects]] + name = "github.com/hashicorp/consul" + packages = ["api"] + revision = "3f92cc70e8163df866873c16c6d89889b5c95fc4" + +[[projects]] + name = "github.com/hashicorp/go-cleanhttp" + packages = ["."] + revision = "3573b8b52aa7b37b9358d966a898feb387f62437" + +[[projects]] + name = "github.com/hashicorp/go-version" + packages = ["."] + revision = "03c5bf6be031b6dd45afec16b1cf94fc8938bc77" + +[[projects]] + name = "github.com/hashicorp/serf" + packages = ["coordinate"] + revision = "19f2c401e122352c047a84d6584dd51e2fb8fcc4" + +[[projects]] + name = "github.com/huandu/xstrings" + packages = ["."] + revision = "3959339b333561bf62a38b424fd41517c2c90f40" + +[[projects]] + name = "github.com/imdario/mergo" + packages = ["."] + revision = "7fe0c75c13abdee74b09fcacef5ea1c6bba6a874" + version = "0.2.4" + +[[projects]] + name = "github.com/influxdata/influxdb" + packages = [ + "client/v2", + "models", + "pkg/escape" + ] + revision = "2d474a3089bcfce6b472779be9470a1f0ef3d5e4" + version = "v1.3.7" + +[[projects]] + branch = "master" + name = "github.com/jjcollinge/servicefabric" + packages = ["."] + revision = "8026935326c842b71dee8e2329c1fda41a7a92f4" + +[[projects]] + name = "github.com/jmespath/go-jmespath" + packages = ["."] + revision = "bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d" + +[[projects]] + name = "github.com/jonboulle/clockwork" + packages = ["."] + revision = "72f9bd7c4e0c2a40055ab3d0f09654f730cce982" + +[[projects]] + name = "github.com/juju/ratelimit" + packages = ["."] + revision = "77ed1c8a01217656d2080ad51981f6e99adaa177" + +[[projects]] + branch = "master" + name = "github.com/kr/logfmt" + packages = ["."] + revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" + +[[projects]] + name = "github.com/libkermit/compose" + packages = [ + ".", + "check" + ] + revision = "4a33a16f1446ba205c4da7b09105d5bdc293b432" + +[[projects]] + name = "github.com/libkermit/docker" + packages = ["."] + revision = "ddede409294e8c5ae66d68ac09edb6b27e8f3e4a" + +[[projects]] + name = "github.com/libkermit/docker-check" + packages = ["."] + revision = "e0695005d6819191cf8969b479c94c40c8d22aa4" + +[[projects]] + name = "github.com/mailgun/minheap" + packages = ["."] + revision = "7c28d80e2ada649fc8ab1a37b86d30a2633bd47c" + +[[projects]] + name = "github.com/mailgun/timetools" + packages = ["."] + revision = "7e6055773c5137efbeb3bd2410d705fe10ab6bfd" + +[[projects]] + branch = "master" + name = "github.com/mailgun/ttlmap" + packages = ["."] + revision = "c1c17f74874f2a5ea48bfb06b5459d4ef2689749" + +[[projects]] + name = "github.com/mailru/easyjson" + packages = [ + "buffer", + "jlexer", + "jwriter" + ] + revision = "d5b7844b561a7bc640052f1b935f7b800330d7e0" + +[[projects]] + name = "github.com/mattn/go-colorable" + packages = ["."] + revision = "5411d3eea5978e6cdc258b30de592b60df6aba96" + +[[projects]] + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "57fdcb988a5c543893cc61bce354a6e24ab70022" + +[[projects]] + name = "github.com/mattn/go-shellwords" + packages = ["."] + revision = "02e3cf038dcea8290e44424da473dd12be796a8a" + version = "v1.0.3" + +[[projects]] + branch = "master" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + +[[projects]] + name = "github.com/mesos/mesos-go" + packages = [ + "detector", + "detector/zoo", + "mesosproto", + "mesosutil", + "upid" + ] + revision = "068d5470506e3780189fe607af40892814197c5e" + +[[projects]] + branch = "master" + name = "github.com/mesosphere/mesos-dns" + packages = [ + "detect", + "errorutil", + "logging", + "models", + "records", + "records/labels", + "records/state", + "util" + ] + revision = "b47dc4c19f215e98da687b15b4c64e70f629bea5" + source = "https://github.com/containous/mesos-dns.git" + +[[projects]] + name = "github.com/miekg/dns" + packages = ["."] + revision = "8060d9f51305bbe024b99679454e62f552cd0b0b" + +[[projects]] + branch = "master" + name = "github.com/mitchellh/copystructure" + packages = ["."] + revision = "d23ffcb85de31694d6ccaa23ccb4a03e55c1303f" + +[[projects]] + branch = "master" + name = "github.com/mitchellh/hashstructure" + packages = ["."] + revision = "2bca23e0e452137f789efbc8610126fd8b94f73b" + +[[projects]] + branch = "master" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + revision = "06020f85339e21b2478f756a78e295255ffa4d6a" + +[[projects]] + branch = "master" + name = "github.com/mitchellh/reflectwalk" + packages = ["."] + revision = "63d60e9d0dbc60cf9164e6510889b0db6683d98c" + +[[projects]] + name = "github.com/mvdan/xurls" + packages = ["."] + revision = "db96455566f05ffe42bd6ac671f05eeb1152b45d" + +[[projects]] + branch = "master" + name = "github.com/ogier/pflag" + packages = ["."] + revision = "45c278ab3607870051a2ea9040bb85fcb8557481" + +[[projects]] + name = "github.com/opencontainers/go-digest" + packages = ["."] + revision = "a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb" + +[[projects]] + name = "github.com/opencontainers/image-spec" + packages = [ + "specs-go", + "specs-go/v1" + ] + revision = "f03dbe35d449c54915d235f1a3cf8f585a24babe" + +[[projects]] + name = "github.com/opencontainers/runc" + packages = [ + "libcontainer/system", + "libcontainer/user" + ] + revision = "b6b70e53451794e8333e9b602cc096b47a20bd0f" + +[[projects]] + name = "github.com/ovh/go-ovh" + packages = ["ovh"] + revision = "4b1fea467323b74c5f462f0947f402b428ca0626" + +[[projects]] + name = "github.com/pborman/uuid" + packages = ["."] + revision = "ca53cad383cad2479bbba7f7a1a05797ec1386e4" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "c605e284fe17294bda444b34710735b29d1a9d90" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "d8ed2627bdf02c080bf22230dbb337003b7aba2d" + +[[projects]] + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/promhttp" + ] + revision = "08fd2e12372a66e68e30523c7642e0cbc3e4fbde" + +[[projects]] + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "6f3806018612930941127f2a7c6c453ba2c527d2" + +[[projects]] + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model" + ] + revision = "49fee292b27bfff7f354ee0f64e1bc4850462edf" + +[[projects]] + name = "github.com/prometheus/procfs" + packages = [ + ".", + "xfs" + ] + revision = "a1dba9ce8baed984a2495b658c82687f8157b98f" + +[[projects]] + name = "github.com/rancher/go-rancher" + packages = ["v2"] + revision = "52e2f489534007ae843065468c5a1920d542afa4" + +[[projects]] + branch = "master" + name = "github.com/rancher/go-rancher-metadata" + packages = ["metadata"] + revision = "d2103caca5873119ff423d29cba09b4d03cd69b8" + +[[projects]] + branch = "master" + name = "github.com/ryanuber/go-glob" + packages = ["."] + revision = "256dc444b735e061061cf46c809487313d5b0065" + +[[projects]] + name = "github.com/samuel/go-zookeeper" + packages = ["zk"] + revision = "1d7be4effb13d2d908342d349d71a284a7542693" + +[[projects]] + name = "github.com/satori/go.uuid" + packages = ["."] + revision = "879c5887cd475cd7864858769793b2ceb0d44feb" + version = "v1.1.0" + +[[projects]] + name = "github.com/spf13/pflag" + packages = ["."] + revision = "cb88ea77998c3f024757528e3305022ab50b43be" + +[[projects]] + name = "github.com/stretchr/objx" + packages = ["."] + revision = "cbeaeb16a013161a98496fad62933b1d21786672" + +[[projects]] + name = "github.com/stretchr/testify" + packages = [ + "assert", + "mock", + "require" + ] + revision = "4d4bfba8f1d1027c4fdbe371823030df51419987" + +[[projects]] + branch = "master" + name = "github.com/stvp/go-udp-testing" + packages = ["."] + revision = "c4434f09ec131ecf30f986d5dcb1636508bfa49a" + +[[projects]] + name = "github.com/thoas/stats" + packages = ["."] + revision = "152b5d051953fdb6e45f14b6826962aadc032324" + +[[projects]] + branch = "master" + name = "github.com/timewasted/linode" + packages = [ + ".", + "dns" + ] + revision = "37e84520dcf74488f67654f9c775b9752c232dc1" + +[[projects]] + name = "github.com/tv42/zbase32" + packages = ["."] + revision = "03389da7e0bf9844767f82690f4d68fc097a1306" + +[[projects]] + name = "github.com/ugorji/go" + packages = ["codec"] + revision = "ea9cd21fa0bc41ee4bdd50ac7ed8cbc7ea2ed960" + +[[projects]] + name = "github.com/unrolled/render" + packages = ["."] + revision = "50716a0a853771bb36bfce61a45cdefdb98c2e6e" + +[[projects]] + name = "github.com/unrolled/secure" + packages = ["."] + revision = "824e85271811af89640ea25620c67f6c2eed987e" + +[[projects]] + name = "github.com/urfave/negroni" + packages = ["."] + revision = "490e6a555d47ca891a89a150d0c1ef3922dfffe9" + +[[projects]] + name = "github.com/vdemeester/shakers" + packages = ["."] + revision = "24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3" + version = "v0.1.0" + +[[projects]] + branch = "containous-fork" + name = "github.com/vulcand/oxy" + packages = [ + "cbreaker", + "connlimit", + "forward", + "memmetrics", + "ratelimit", + "roundrobin", + "utils" + ] + revision = "812cebb8c764f2a78cb806267648b8728b4599ad" + source = "https://github.com/containous/oxy.git" + +[[projects]] + name = "github.com/vulcand/predicate" + packages = ["."] + revision = "19b9dde14240d94c804ae5736ad0e1de10bf8fe6" + +[[projects]] + name = "github.com/vulcand/route" + packages = ["."] + revision = "cb89d787ddbb1c5849a7ac9f79004c1fd12a4a32" + +[[projects]] + name = "github.com/vulcand/vulcand" + packages = [ + "conntracker", + "plugin", + "plugin/rewrite", + "router" + ] + revision = "42492a3a85e294bdbdd1bcabb8c12769a81ea284" + +[[projects]] + branch = "master" + name = "github.com/xeipuuv/gojsonpointer" + packages = ["."] + revision = "6fe8760cad3569743d51ddbb243b26f8456742dc" + +[[projects]] + branch = "master" + name = "github.com/xeipuuv/gojsonreference" + packages = ["."] + revision = "e02fc20de94c78484cd5ffb007f8af96be030a45" + +[[projects]] + name = "github.com/xeipuuv/gojsonschema" + packages = ["."] + revision = "0c8571ac0ce161a5feb57375a9cdf148c98c0f70" + +[[projects]] + name = "github.com/xenolf/lego" + packages = [ + "acme", + "providers/dns", + "providers/dns/auroradns", + "providers/dns/azure", + "providers/dns/cloudflare", + "providers/dns/digitalocean", + "providers/dns/dnsimple", + "providers/dns/dnsmadeeasy", + "providers/dns/dnspod", + "providers/dns/dyn", + "providers/dns/exoscale", + "providers/dns/gandi", + "providers/dns/googlecloud", + "providers/dns/linode", + "providers/dns/namecheap", + "providers/dns/ns1", + "providers/dns/otc", + "providers/dns/ovh", + "providers/dns/pdns", + "providers/dns/rackspace", + "providers/dns/rfc2136", + "providers/dns/route53", + "providers/dns/vultr" + ] + revision = "67c86d860a797ce2483f50d9174d4ed24984bef2" + version = "v0.4.1" + +[[projects]] + name = "golang.org/x/crypto" + packages = [ + "bcrypt", + "blowfish", + "ocsp", + "pbkdf2", + "scrypt" + ] + revision = "4ed45ec682102c643324fae5dff8dab085b6c300" + +[[projects]] + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "lex/httplex", + "proxy", + "publicsuffix", + "trace", + "websocket" + ] + revision = "c8c74377599bd978aee1cf3b9b63a8634051cec2" + +[[projects]] + name = "golang.org/x/oauth2" + packages = [ + ".", + "google", + "internal", + "jws", + "jwt" + ] + revision = "7fdf09982454086d5570c7db3e11f360194830ca" + +[[projects]] + name = "golang.org/x/sys" + packages = [ + "unix", + "windows" + ] + revision = "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9" + +[[projects]] + name = "golang.org/x/text" + packages = [ + "cases", + "internal", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "runes", + "secure/bidirule", + "secure/precis", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + "width" + ] + revision = "4ee4af566555f5fbe026368b75596286a312663a" + +[[projects]] + name = "golang.org/x/time" + packages = ["rate"] + revision = "8be79e1e0910c292df4e79c241bb7e8f7e725959" + +[[projects]] + name = "google.golang.org/api" + packages = [ + "dns/v1", + "gensupport", + "googleapi", + "googleapi/internal/uritemplates" + ] + revision = "1575df15c1bb8b18ad4d9bc5ca495cc85b0764fe" + +[[projects]] + name = "google.golang.org/appengine" + packages = [ + ".", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/urlfetch", + "urlfetch" + ] + revision = "4f7eeb5305a4ba1966344836ba4af9996b7b4e05" + +[[projects]] + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + revision = "09f6ed296fc66555a25fe4ce95173148778dfa85" + +[[projects]] + name = "google.golang.org/grpc" + packages = [ + ".", + "codes", + "connectivity", + "credentials", + "grpclb/grpc_lb_v1", + "grpclog", + "internal", + "keepalive", + "metadata", + "naming", + "peer", + "stats", + "status", + "tap", + "transport" + ] + revision = "b3ddf786825de56a4178401b7e174ee332173b66" + version = "v1.5.2" + +[[projects]] + name = "gopkg.in/fsnotify.v1" + packages = ["."] + revision = "629574ca2a5df945712d3079857300b5e4da0236" + version = "v1.4.2" + +[[projects]] + name = "gopkg.in/inf.v0" + packages = ["."] + revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" + version = "v0.9.0" + +[[projects]] + name = "gopkg.in/ini.v1" + packages = ["."] + revision = "5b3e00af70a9484542169a976dcab8d03e601a17" + version = "v1.30.0" + +[[projects]] + name = "gopkg.in/ns1/ns1-go.v2" + packages = [ + "rest", + "rest/model/account", + "rest/model/data", + "rest/model/dns", + "rest/model/filter", + "rest/model/monitor" + ] + revision = "c563826f4cbef9c11bebeb9f20a3f7afe9c1e2f4" + +[[projects]] + name = "gopkg.in/square/go-jose.v1" + packages = [ + ".", + "cipher", + "json" + ] + revision = "aa2e30fdd1fe9dd3394119af66451ae790d50e0d" + version = "v1.1.0" + +[[projects]] + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "53feefa2559fb8dfa8d81baad31be332c97d6c77" + +[[projects]] + name = "k8s.io/client-go" + packages = [ + "discovery", + "kubernetes", + "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/certificates/v1alpha1", + "kubernetes/typed/core/v1", + "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/storage/v1beta1", + "pkg/api", + "pkg/api/errors", + "pkg/api/install", + "pkg/api/meta", + "pkg/api/meta/metatypes", + "pkg/api/resource", + "pkg/api/unversioned", + "pkg/api/v1", + "pkg/api/validation/path", + "pkg/apimachinery", + "pkg/apimachinery/announced", + "pkg/apimachinery/registered", + "pkg/apis/apps", + "pkg/apis/apps/install", + "pkg/apis/apps/v1beta1", + "pkg/apis/authentication", + "pkg/apis/authentication/install", + "pkg/apis/authentication/v1beta1", + "pkg/apis/authorization", + "pkg/apis/authorization/install", + "pkg/apis/authorization/v1beta1", + "pkg/apis/autoscaling", + "pkg/apis/autoscaling/install", + "pkg/apis/autoscaling/v1", + "pkg/apis/batch", + "pkg/apis/batch/install", + "pkg/apis/batch/v1", + "pkg/apis/batch/v2alpha1", + "pkg/apis/certificates", + "pkg/apis/certificates/install", + "pkg/apis/certificates/v1alpha1", + "pkg/apis/extensions", + "pkg/apis/extensions/install", + "pkg/apis/extensions/v1beta1", + "pkg/apis/policy", + "pkg/apis/policy/install", + "pkg/apis/policy/v1beta1", + "pkg/apis/rbac", + "pkg/apis/rbac/install", + "pkg/apis/rbac/v1alpha1", + "pkg/apis/storage", + "pkg/apis/storage/install", + "pkg/apis/storage/v1beta1", + "pkg/auth/user", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/genericapiserver/openapi/common", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/third_party/forked/golang/reflect", + "pkg/third_party/forked/golang/template", + "pkg/types", + "pkg/util", + "pkg/util/cert", + "pkg/util/clock", + "pkg/util/diff", + "pkg/util/errors", + "pkg/util/flowcontrol", + "pkg/util/framer", + "pkg/util/integer", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/jsonpath", + "pkg/util/labels", + "pkg/util/net", + "pkg/util/parsers", + "pkg/util/rand", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/uuid", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "pkg/watch/versioned", + "plugin/pkg/client/auth", + "plugin/pkg/client/auth/gcp", + "plugin/pkg/client/auth/oidc", + "rest", + "tools/cache", + "tools/clientcmd/api", + "tools/metrics", + "transport" + ] + revision = "e121606b0d09b2e1c467183ee46217fa85a6b672" + version = "v2.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "8cd40e70454298aa3ef967edf2c501aef87f1964b9e5cef3318f2c99fc5e620e" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 000000000..782f6cb89 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,191 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/sirupsen/logrus"] + +[[constraint]] + branch = "master" + name = "github.com/ArthurHlt/go-eureka-client" + +[[constraint]] + branch = "master" + name = "github.com/BurntSushi/toml" + +[[constraint]] + branch = "master" + name = "github.com/BurntSushi/ty" + +[[constraint]] + branch = "master" + name = "github.com/NYTimes/gziphandler" + +[[constraint]] + name = "github.com/abbot/go-http-auth" + version = "0.4.0" + +[[constraint]] + branch = "master" + name = "github.com/armon/go-proxyproto" + +[[constraint]] + name = "github.com/aws/aws-sdk-go" + version = "1.6.18" + +[[constraint]] + branch = "master" + name = "github.com/cenk/backoff" + +[[constraint]] + name = "github.com/containous/flaeg" + version = "1.0.1" + +[[constraint]] + branch = "master" + name = "github.com/containous/mux" + +[[constraint]] + name = "github.com/containous/staert" + version = "2.0.0" + +[[constraint]] + name = "github.com/containous/traefik-extra-service-fabric" + version = "1.0.5" + +[[constraint]] + name = "github.com/coreos/go-systemd" + version = "14.0.0" + +[[constraint]] + branch = "master" + name = "github.com/docker/leadership" + source = "github.com/containous/leadership" + +[[constraint]] + name = "github.com/docker/libkv" + source = "github.com/abronan/libkv" + +[[constraint]] + name = "github.com/eapache/channels" + version = "1.1.0" + +[[constraint]] + branch = "master" + name = "github.com/elazarl/go-bindata-assetfs" + +[[constraint]] + name = "github.com/go-check/check" + source = "github.com/containous/check" + +[[constraint]] + name = "github.com/go-kit/kit" + version = "0.3.0" + +[[constraint]] + name = "github.com/influxdata/influxdb" + version = "1.3.7" + +[[constraint]] + branch = "master" + name = "github.com/jjcollinge/servicefabric" + +[[constraint]] + name = "github.com/mattn/go-shellwords" + version = "1.0.3" + +[[constraint]] + name = "github.com/mesosphere/mesos-dns" + source = "https://github.com/containous/mesos-dns.git" + +[[constraint]] + branch = "master" + name = "github.com/mitchellh/copystructure" + +[[constraint]] + branch = "master" + name = "github.com/mitchellh/hashstructure" + +[[constraint]] + branch = "master" + name = "github.com/mitchellh/mapstructure" + +[[constraint]] + branch = "master" + name = "github.com/rancher/go-rancher-metadata" + +[[constraint]] + branch = "master" + name = "github.com/ryanuber/go-glob" + +[[constraint]] + name = "github.com/satori/go.uuid" + version = "1.1.0" + +[[constraint]] + branch = "master" + name = "github.com/stvp/go-udp-testing" + +[[constraint]] + name = "github.com/vdemeester/shakers" + version = "0.1.0" + +[[constraint]] + branch = "containous-fork" + name = "github.com/vulcand/oxy" + source = "https://github.com/containous/oxy.git" + +[[constraint]] + name = "github.com/xenolf/lego" + version = "0.4.1" + +[[constraint]] + name = "google.golang.org/grpc" + version = "1.5.2" + +[[constraint]] + name = "gopkg.in/fsnotify.v1" + version = "1.4.2" + +[[constraint]] + name = "k8s.io/client-go" + version = "2.0.0" + +[[override]] + name = "github.com/Nvveen/Gotty" + revision = "6018b68f96b839edfbe3fb48668853f5dbad88a3" + source = "github.com/ijc25/Gotty" + +[[override]] + name = "github.com/gorilla/websocket" + revision = "a69d9f6de432e2c6b296a947d8a5ee88f68522cf" + +[[override]] + # always keep this override + name = "github.com/mailgun/timetools" + revision = "7e6055773c5137efbeb3bd2410d705fe10ab6bfd" + +[[override]] + name = "github.com/vulcand/predicate" + revision = "19b9dde14240d94c804ae5736ad0e1de10bf8fe6" + +[[override]] + # remove override on master + name = "github.com/coreos/bbolt" + revision = "32c383e75ce054674c53b5a07e55de85332aee14" diff --git a/Makefile b/Makefile index 967a9e175..9683dc92d 100644 --- a/Makefile +++ b/Makefile @@ -74,7 +74,7 @@ test-integration: build ## run the integration tests TEST_HOST=1 ./script/make.sh test-integration validate: build ## validate gofmt, golint and go vet - $(DOCKER_RUN_TRAEFIK) ./script/make.sh validate-glide validate-gofmt validate-govet validate-golint validate-misspell validate-vendor validate-autogen + $(DOCKER_RUN_TRAEFIK) ./script/make.sh validate-gofmt validate-govet validate-golint validate-misspell validate-vendor validate-autogen build: dist docker build $(DOCKER_BUILD_ARGS) -t "$(TRAEFIK_DEV_IMAGE)" -f build.Dockerfile . @@ -127,5 +127,8 @@ fmt: pull-images: grep --no-filename -E '^\s+image:' ./integration/resources/compose/*.yml | awk '{print $$2}' | sort | uniq | xargs -P 6 -n 1 docker pull +prune-dep: + ./script/prune-dep.sh + help: ## this help @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) diff --git a/build.Dockerfile b/build.Dockerfile index d85036b9c..6d2e4e81e 100644 --- a/build.Dockerfile +++ b/build.Dockerfile @@ -7,20 +7,18 @@ RUN apk --update upgrade \ RUN go get github.com/jteeuwen/go-bindata/... \ && go get github.com/golang/lint/golint \ && go get github.com/kisielk/errcheck \ -&& go get github.com/client9/misspell/cmd/misspell \ -&& go get github.com/mattfarina/glide-hash \ -&& go get github.com/sgotti/glide-vc +&& go get github.com/client9/misspell/cmd/misspell # Which docker version to test on ARG DOCKER_VERSION=17.03.2 +ARG DEP_VERSION=0.3.2 -# Which glide version to test on -ARG GLIDE_VERSION=v0.12.3 - -# Download glide +# Download dep binary to bin folder in $GOPATH RUN mkdir -p /usr/local/bin \ - && curl -fL https://github.com/Masterminds/glide/releases/download/${GLIDE_VERSION}/glide-${GLIDE_VERSION}-linux-amd64.tar.gz \ - | tar -xzC /usr/local/bin --transform 's#^.+/##x' + && curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 \ + && chmod +x /usr/local/bin/dep + + # Download docker RUN mkdir -p /usr/local/bin \ diff --git a/glide.lock b/glide.lock deleted file mode 100644 index 371ad536f..000000000 --- a/glide.lock +++ /dev/null @@ -1,860 +0,0 @@ -hash: cccbfefb9183eb5425bfd242e4ae673d5a2acfa30ce83da377fd795ebcecf315 -updated: 2017-12-15T10:34:41.246378337+01:00 -imports: -- name: cloud.google.com/go - version: 2e6a95edb1071d750f6d7db777bf66cd2997af6c - subpackages: - - compute/metadata - - internal -- name: github.com/abbot/go-http-auth - version: 0ddd408d5d60ea76e320503cc7dd091992dee608 -- name: github.com/aokoli/goutils - version: 3391d3790d23d03408670993e957e8f408993c34 -- name: github.com/armon/go-proxyproto - version: 48572f11356f1843b694f21a290d4f1006bc5e47 -- name: github.com/ArthurHlt/go-eureka-client - version: 9d0a49cbd39aa3634ae1977e9f519a262b10adaf - subpackages: - - eureka -- name: github.com/ArthurHlt/gominlog - version: 72eebf980f467d3ab3a8b4ddf660f664911ce519 -- name: github.com/aws/aws-sdk-go - version: 3f8f870ec9939e32b3372abf74d24e468bcd285d - subpackages: - - aws - - aws/awserr - - aws/awsutil - - aws/client - - aws/client/metadata - - aws/corehandlers - - aws/credentials - - aws/credentials/ec2rolecreds - - aws/credentials/endpointcreds - - aws/credentials/stscreds - - aws/defaults - - aws/ec2metadata - - aws/endpoints - - aws/request - - aws/session - - aws/signer/v4 - - private/protocol - - private/protocol/ec2query - - private/protocol/json/jsonutil - - private/protocol/jsonrpc - - private/protocol/query - - private/protocol/query/queryutil - - private/protocol/rest - - private/protocol/restxml - - private/protocol/xml/xmlutil - - private/waiter - - service/dynamodb - - service/dynamodb/dynamodbattribute - - service/dynamodb/dynamodbiface - - service/dynamodbattribute - - service/ec2 - - service/ecs - - service/route53 - - service/sts -- name: github.com/Azure/azure-sdk-for-go - version: f7bb4db3ea4c73dc58bd284c38ea644a79324be0 - subpackages: - - arm/dns -- name: github.com/Azure/go-autorest - version: f6be1abbb5abd0517522f850dd785990d373da7e - subpackages: - - autorest - - autorest/adal - - autorest/azure - - autorest/date - - autorest/to -- name: github.com/beorn7/perks - version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 - subpackages: - - quantile -- name: github.com/blang/semver - version: 31b736133b98f26d5e078ec9eb591666edfd091f -- name: github.com/boltdb/bolt - version: e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd -- name: github.com/BurntSushi/toml - version: b26d9c308763d68093482582cea63d69be07a0f0 -- name: github.com/BurntSushi/ty - version: 6add9cd6ad42d389d6ead1dde60b4ad71e46fd74 - subpackages: - - fun -- name: github.com/cenk/backoff - version: 5d150e7eec023ce7a124856b37c68e54b4050ac7 -- name: github.com/codahale/hdrhistogram - version: 9208b142303c12d8899bae836fd524ac9338b4fd -- name: github.com/codegangsta/cli - version: bf4a526f48af7badd25d2cb02d587e1b01be3b50 -- name: github.com/containous/flaeg - version: 60c87a513a955ca7225e1b1c772581cea8420cb4 -- name: github.com/containous/mux - version: 06ccd3e75091eb659b1d720cda0e16bc7057954c -- name: github.com/containous/staert - version: af517d5b70db9c4b0505e0144fcc62b054057d2a -- name: github.com/containous/traefik-extra-service-fabric - version: ca1fb57108293caad285b1c366b763f6c6ab71c9 -- name: github.com/coreos/bbolt - version: 3c6cbfb299c11444eb2f8c9d48f0d2ce09157423 -- name: github.com/coreos/etcd - version: f1d7dd87da3e8feab4aaf675b8e29c6a5ed5f58b - subpackages: - - auth/authpb - - client - - clientv3 - - clientv3/concurrency - - etcdserver/api/v3rpc/rpctypes - - etcdserver/etcdserverpb - - mvcc/mvccpb - - pkg/pathutil - - pkg/srv - - pkg/types - - version -- name: github.com/coreos/go-oidc - version: 5644a2f50e2d2d5ba0b474bc5bc55fea1925936d - subpackages: - - http - - jose - - key - - oauth2 - - oidc -- name: github.com/coreos/go-semver - version: 8ab6407b697782a06568d4b7f1db25550ec2e4c6 - subpackages: - - semver -- name: github.com/coreos/go-systemd - version: 48702e0da86bd25e76cfef347e2adeb434a0d0a6 - subpackages: - - daemon -- name: github.com/coreos/pkg - version: fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 - subpackages: - - health - - httputil - - timeutil -- name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - subpackages: - - spew -- name: github.com/decker502/dnspod-go - version: f33a2c6040fc2550a631de7b3a53bddccdcd73fb -- name: github.com/dgrijalva/jwt-go - version: d2709f9f1f31ebcda9651b03077758c1f3a0018c -- name: github.com/dnsimple/dnsimple-go - version: f2d9b723cc9547d182e24ac2e527ae25d25fc93f - subpackages: - - dnsimple -- name: github.com/docker/distribution - version: b38e5838b7b2f2ad48e06ec4b500011976080621 - subpackages: - - context - - digestset - - reference - - registry/api/errcode - - registry/api/v2 - - registry/client - - registry/client/auth - - registry/client/auth/challenge - - registry/client/transport - - registry/storage/cache - - registry/storage/cache/memory - - uuid -- name: github.com/docker/docker - version: 75c7536d2e2e328b644bf69153de879d1d197988 - subpackages: - - api - - api/types - - api/types/blkiodev - - api/types/container - - api/types/events - - api/types/filters - - api/types/image - - api/types/mount - - api/types/network - - api/types/registry - - api/types/strslice - - api/types/swarm - - api/types/time - - api/types/versions - - api/types/volume - - builder/dockerignore - - client - - opts - - pkg/archive - - pkg/fileutils - - pkg/gitutils - - pkg/homedir - - pkg/httputils - - pkg/idtools - - pkg/ioutils - - pkg/jsonlog - - pkg/jsonmessage - - pkg/longpath - - pkg/mount - - pkg/namesgenerator - - pkg/pools - - pkg/progress - - pkg/promise - - pkg/random - - pkg/stdcopy - - pkg/streamformatter - - pkg/stringid - - pkg/symlink - - pkg/system - - pkg/tarsum - - pkg/term - - pkg/term/windows - - pkg/tlsconfig - - pkg/urlutil - - registry - - runconfig/opts -- name: github.com/docker/go-connections - version: e15c02316c12de00874640cd76311849de2aeed5 - subpackages: - - nat - - sockets - - tlsconfig -- name: github.com/docker/go-units - version: 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 -- name: github.com/docker/leadership - version: af20da7d3e62be9259835e93261acf931b5adecf - repo: https://github.com/containous/leadership.git - vcs: git -- name: github.com/docker/libkv - version: 5e4bb288a9a74320bb03f5c18d6bdbab0d8049de - repo: https://github.com/abronan/libkv.git - vcs: git - subpackages: - - store - - store/boltdb - - store/consul - - store/etcd - - store/etcd/v2 - - store/etcd/v3 - - store/zookeeper -- name: github.com/docker/libtrust - version: 9cbd2a1374f46905c68a4eb3694a130610adc62a -- name: github.com/donovanhide/eventsource - version: b8f31a59085e69dd2678cf51840db2ac625cb741 -- name: github.com/eapache/channels - version: 47238d5aae8c0fefd518ef2bee46290909cf8263 -- name: github.com/eapache/queue - version: 44cc805cf13205b55f69e14bcb69867d1ae92f98 -- name: github.com/edeckers/auroradnsclient - version: 398f53855ba258191157e20fabfaccca5e13cea9 - subpackages: - - records - - requests - - requests/errors - - tokens - - zones -- name: github.com/elazarl/go-bindata-assetfs - version: 30f82fa23fd844bd5bb1e5f216db87fd77b5eb43 -- name: github.com/emicklei/go-restful - version: 89ef8af493ab468a45a42bb0d89a06fccdd2fb22 - subpackages: - - log - - swagger -- name: github.com/exoscale/egoscale - version: 325740036187ddae3a5b74be00fbbc70011c4d96 -- name: github.com/fatih/color - version: 62e9147c64a1ed519147b62a56a14e83e2be02c1 -- name: github.com/gambol99/go-marathon - version: 03b46169666c53b9cc953b875ac5714e5103e064 -- name: github.com/ghodss/yaml - version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee -- name: github.com/go-ini/ini - version: f384f410798cbe7cdce40eec40b79ed32bb4f1ad -- name: github.com/go-kit/kit - version: f66b0e13579bfc5a48b9e2a94b1209c107ea1f41 - subpackages: - - log - - metrics - - metrics/dogstatsd - - metrics/generic - - metrics/influx - - metrics/internal/lv - - metrics/internal/ratemap - - metrics/multi - - metrics/prometheus - - metrics/statsd - - util/conn -- name: github.com/go-logfmt/logfmt - version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 -- name: github.com/go-openapi/jsonpointer - version: 46af16f9f7b149af66e5d1bd010e3574dc06de98 -- name: github.com/go-openapi/jsonreference - version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272 -- name: github.com/go-openapi/spec - version: 6aced65f8501fe1217321abf0749d354824ba2ff -- name: github.com/go-openapi/swag - version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72 -- name: github.com/go-stack/stack - version: 54be5f394ed2c3e19dac9134a40a95ba5a017f7b -- name: github.com/gogo/protobuf - version: 909568be09de550ed094403c2bf8a261b5bb730a - subpackages: - - proto - - sortkeys -- name: github.com/golang/glog - version: 44145f04b68cf362d9c4df2182967c2275eaefed -- name: github.com/golang/protobuf - version: 4bd1920723d7b7c925de087aa32e2187708897f7 - subpackages: - - jsonpb - - proto - - ptypes/any -- name: github.com/google/go-github - version: fe7d11f8add400587b6718d9f39a62e42cb04c28 - subpackages: - - github -- name: github.com/google/go-querystring - version: 53e6ce116135b80d037921a7fdd5138cf32d7a8a - subpackages: - - query -- name: github.com/google/gofuzz - version: bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5 -- name: github.com/googleapis/gax-go - version: 9af46dd5a1713e8b5cd71106287eba3cefdde50b -- name: github.com/gorilla/context - version: 215affda49addc4c8ef7e2534915df2c8c35c6cd -- name: github.com/gorilla/websocket - version: a69d9f6de432e2c6b296a947d8a5ee88f68522cf -- name: github.com/hashicorp/consul - version: 3f92cc70e8163df866873c16c6d89889b5c95fc4 - subpackages: - - api -- name: github.com/hashicorp/go-cleanhttp - version: 3573b8b52aa7b37b9358d966a898feb387f62437 -- name: github.com/hashicorp/go-version - version: 03c5bf6be031b6dd45afec16b1cf94fc8938bc77 -- name: github.com/hashicorp/serf - version: 19f2c401e122352c047a84d6584dd51e2fb8fcc4 - subpackages: - - coordinate -- name: github.com/huandu/xstrings - version: 3959339b333561bf62a38b424fd41517c2c90f40 -- name: github.com/imdario/mergo - version: 7fe0c75c13abdee74b09fcacef5ea1c6bba6a874 -- name: github.com/influxdata/influxdb - version: 2d474a3089bcfce6b472779be9470a1f0ef3d5e4 - subpackages: - - client/v2 - - models - - pkg/escape -- name: github.com/JamesClonk/vultr - version: 2fd0705ce648e602e6c9c57329a174270a4f6688 - subpackages: - - lib -- name: github.com/jjcollinge/servicefabric - version: 8026935326c842b71dee8e2329c1fda41a7a92f4 -- name: github.com/jmespath/go-jmespath - version: bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d -- name: github.com/jonboulle/clockwork - version: 72f9bd7c4e0c2a40055ab3d0f09654f730cce982 -- name: github.com/juju/ratelimit - version: 77ed1c8a01217656d2080ad51981f6e99adaa177 -- name: github.com/kr/logfmt - version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 -- name: github.com/mailgun/minheap - version: 7c28d80e2ada649fc8ab1a37b86d30a2633bd47c -- name: github.com/mailgun/timetools - version: 7e6055773c5137efbeb3bd2410d705fe10ab6bfd -- name: github.com/mailgun/ttlmap - version: c1c17f74874f2a5ea48bfb06b5459d4ef2689749 -- name: github.com/mailru/easyjson - version: d5b7844b561a7bc640052f1b935f7b800330d7e0 - subpackages: - - buffer - - jlexer - - jwriter -- name: github.com/Masterminds/semver - version: 59c29afe1a994eacb71c833025ca7acf874bb1da -- name: github.com/Masterminds/sprig - version: e039e20e500c2c025d9145be375e27cf42a94174 -- name: github.com/mattn/go-colorable - version: 5411d3eea5978e6cdc258b30de592b60df6aba96 - repo: https://github.com/mattn/go-colorable -- name: github.com/mattn/go-isatty - version: 57fdcb988a5c543893cc61bce354a6e24ab70022 - repo: https://github.com/mattn/go-isatty -- name: github.com/mattn/go-shellwords - version: 02e3cf038dcea8290e44424da473dd12be796a8a -- name: github.com/matttproud/golang_protobuf_extensions - version: c12348ce28de40eed0136aa2b644d0ee0650e56c - subpackages: - - pbutil -- name: github.com/mesos/mesos-go - version: 068d5470506e3780189fe607af40892814197c5e - subpackages: - - detector - - detector/zoo - - mesos - - mesosproto - - mesosutil - - upid -- name: github.com/mesosphere/mesos-dns - version: b47dc4c19f215e98da687b15b4c64e70f629bea5 - repo: https://github.com/containous/mesos-dns.git - vcs: git - subpackages: - - detect - - errorutil - - logging - - models - - records - - records/labels - - records/state - - util -- name: github.com/Microsoft/go-winio - version: f533f7a102197536779ea3a8cb881d639e21ec5a -- name: github.com/miekg/dns - version: 8060d9f51305bbe024b99679454e62f552cd0b0b -- name: github.com/mitchellh/copystructure - version: d23ffcb85de31694d6ccaa23ccb4a03e55c1303f -- name: github.com/mitchellh/hashstructure - version: 2bca23e0e452137f789efbc8610126fd8b94f73b -- name: github.com/mitchellh/mapstructure - version: d0303fe809921458f417bcf828397a65db30a7e4 -- name: github.com/mitchellh/reflectwalk - version: 63d60e9d0dbc60cf9164e6510889b0db6683d98c -- name: github.com/mvdan/xurls - version: db96455566f05ffe42bd6ac671f05eeb1152b45d -- name: github.com/Nvveen/Gotty - version: 6018b68f96b839edfbe3fb48668853f5dbad88a3 - repo: https://github.com/ijc25/Gotty.git - vcs: git -- name: github.com/NYTimes/gziphandler - version: 47ca22a0aeea4c9ceddfb935d818d636d934c312 -- name: github.com/ogier/pflag - version: 45c278ab3607870051a2ea9040bb85fcb8557481 -- name: github.com/opencontainers/go-digest - version: a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb -- name: github.com/opencontainers/image-spec - version: f03dbe35d449c54915d235f1a3cf8f585a24babe - subpackages: - - specs-go - - specs-go/v1 -- name: github.com/ovh/go-ovh - version: 4b1fea467323b74c5f462f0947f402b428ca0626 - subpackages: - - ovh -- name: github.com/pborman/uuid - version: ca53cad383cad2479bbba7f7a1a05797ec1386e4 -- name: github.com/pkg/errors - version: c605e284fe17294bda444b34710735b29d1a9d90 -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/prometheus/client_golang - version: 08fd2e12372a66e68e30523c7642e0cbc3e4fbde - subpackages: - - prometheus - - prometheus/promhttp -- name: github.com/prometheus/client_model - version: 6f3806018612930941127f2a7c6c453ba2c527d2 - subpackages: - - go -- name: github.com/prometheus/common - version: 49fee292b27bfff7f354ee0f64e1bc4850462edf - subpackages: - - expfmt - - internal/bitbucket.org/ww/goautoneg - - model -- name: github.com/prometheus/procfs - version: a1dba9ce8baed984a2495b658c82687f8157b98f - subpackages: - - xfs -- name: github.com/PuerkitoBio/purell - version: 8a290539e2e8629dbc4e6bad948158f790ec31f4 -- name: github.com/PuerkitoBio/urlesc - version: 5bd2802263f21d8788851d5305584c82a5c75d7e -- name: github.com/rancher/go-rancher - version: 52e2f489534007ae843065468c5a1920d542afa4 - subpackages: - - v2 -- name: github.com/rancher/go-rancher-metadata - version: d2103caca5873119ff423d29cba09b4d03cd69b8 - subpackages: - - metadata -- name: github.com/ryanuber/go-glob - version: 256dc444b735e061061cf46c809487313d5b0065 -- name: github.com/samuel/go-zookeeper - version: 1d7be4effb13d2d908342d349d71a284a7542693 - subpackages: - - zk -- name: github.com/satori/go.uuid - version: 879c5887cd475cd7864858769793b2ceb0d44feb -- name: github.com/Sirupsen/logrus - version: 10f801ebc38b33738c9d17d50860f484a0988ff5 -- name: github.com/spf13/pflag - version: cb88ea77998c3f024757528e3305022ab50b43be -- name: github.com/stretchr/objx - version: cbeaeb16a013161a98496fad62933b1d21786672 -- name: github.com/stretchr/testify - version: 4d4bfba8f1d1027c4fdbe371823030df51419987 - subpackages: - - assert - - mock - - require -- name: github.com/thoas/stats - version: 152b5d051953fdb6e45f14b6826962aadc032324 -- name: github.com/timewasted/linode - version: 37e84520dcf74488f67654f9c775b9752c232dc1 - subpackages: - - dns -- name: github.com/tv42/zbase32 - version: 03389da7e0bf9844767f82690f4d68fc097a1306 -- name: github.com/ugorji/go - version: ea9cd21fa0bc41ee4bdd50ac7ed8cbc7ea2ed960 - subpackages: - - codec -- name: github.com/unrolled/render - version: 50716a0a853771bb36bfce61a45cdefdb98c2e6e -- name: github.com/unrolled/secure - version: 824e85271811af89640ea25620c67f6c2eed987e -- name: github.com/urfave/negroni - version: 490e6a555d47ca891a89a150d0c1ef3922dfffe9 -- name: github.com/VividCortex/gohistogram - version: 51564d9861991fb0ad0f531c99ef602d0f9866e6 -- name: github.com/vulcand/oxy - version: 812cebb8c764f2a78cb806267648b8728b4599ad - repo: https://github.com/containous/oxy.git - vcs: git - subpackages: - - cbreaker - - connlimit - - forward - - memmetrics - - ratelimit - - roundrobin - - stream - - utils -- name: github.com/vulcand/predicate - version: 19b9dde14240d94c804ae5736ad0e1de10bf8fe6 -- name: github.com/vulcand/route - version: cb89d787ddbb1c5849a7ac9f79004c1fd12a4a32 -- name: github.com/vulcand/vulcand - version: 42492a3a85e294bdbdd1bcabb8c12769a81ea284 - subpackages: - - conntracker - - plugin - - plugin/rewrite - - router -- name: github.com/xenolf/lego - version: 67c86d860a797ce2483f50d9174d4ed24984bef2 - subpackages: - - acme - - providers/dns - - providers/dns/auroradns - - providers/dns/azure - - providers/dns/cloudflare - - providers/dns/digitalocean - - providers/dns/dnsimple - - providers/dns/dnsmadeeasy - - providers/dns/dnspod - - providers/dns/dyn - - providers/dns/exoscale - - providers/dns/gandi - - providers/dns/googlecloud - - providers/dns/linode - - providers/dns/namecheap - - providers/dns/ns1 - - providers/dns/otc - - providers/dns/ovh - - providers/dns/pdns - - providers/dns/rackspace - - providers/dns/rfc2136 - - providers/dns/route53 - - providers/dns/vultr -- name: golang.org/x/crypto - version: 4ed45ec682102c643324fae5dff8dab085b6c300 - subpackages: - - bcrypt - - blowfish - - ocsp - - pbkdf2 - - scrypt -- name: golang.org/x/net - version: c8c74377599bd978aee1cf3b9b63a8634051cec2 - subpackages: - - context - - context/ctxhttp - - http2 - - http2/hpack - - idna - - internal/timeseries - - lex/httplex - - proxy - - publicsuffix - - trace - - websocket -- name: golang.org/x/oauth2 - version: 7fdf09982454086d5570c7db3e11f360194830ca - subpackages: - - google - - internal - - jws - - jwt -- name: golang.org/x/sys - version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 - subpackages: - - unix - - windows -- name: golang.org/x/text - version: 4ee4af566555f5fbe026368b75596286a312663a - subpackages: - - cases - - internal - - internal/tag - - language - - runes - - secure/bidirule - - secure/precis - - transform - - unicode/bidi - - unicode/norm - - width -- name: golang.org/x/time - version: 8be79e1e0910c292df4e79c241bb7e8f7e725959 - subpackages: - - rate -- name: google.golang.org/api - version: 1575df15c1bb8b18ad4d9bc5ca495cc85b0764fe - subpackages: - - dns/v1 - - gensupport - - googleapi - - googleapi/internal/uritemplates -- name: google.golang.org/appengine - version: 4f7eeb5305a4ba1966344836ba4af9996b7b4e05 - subpackages: - - internal - - internal/app_identity - - internal/base - - internal/datastore - - internal/log - - internal/modules - - internal/remote_api - - internal/urlfetch - - urlfetch -- name: google.golang.org/genproto - version: 09f6ed296fc66555a25fe4ce95173148778dfa85 - subpackages: - - googleapis/rpc/status -- name: google.golang.org/grpc - version: b3ddf786825de56a4178401b7e174ee332173b66 - subpackages: - - codes - - connectivity - - credentials - - grpclb/grpc_lb_v1 - - grpclog - - internal - - keepalive - - metadata - - naming - - peer - - stats - - status - - tap - - transport -- name: gopkg.in/fsnotify.v1 - version: 629574ca2a5df945712d3079857300b5e4da0236 -- name: gopkg.in/inf.v0 - version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 -- name: gopkg.in/ini.v1 - version: 5b3e00af70a9484542169a976dcab8d03e601a17 -- name: gopkg.in/ns1/ns1-go.v2 - version: c563826f4cbef9c11bebeb9f20a3f7afe9c1e2f4 - subpackages: - - rest - - rest/model/account - - rest/model/data - - rest/model/dns - - rest/model/filter - - rest/model/monitor -- name: gopkg.in/square/go-jose.v1 - version: aa2e30fdd1fe9dd3394119af66451ae790d50e0d - subpackages: - - cipher - - json -- name: gopkg.in/yaml.v2 - version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 -- name: k8s.io/client-go - version: e121606b0d09b2e1c467183ee46217fa85a6b672 - subpackages: - - discovery - - kubernetes - - kubernetes/typed/apps/v1beta1 - - kubernetes/typed/authentication/v1beta1 - - kubernetes/typed/authorization/v1beta1 - - kubernetes/typed/autoscaling/v1 - - kubernetes/typed/batch/v1 - - kubernetes/typed/batch/v2alpha1 - - kubernetes/typed/certificates/v1alpha1 - - kubernetes/typed/core/v1 - - kubernetes/typed/extensions/v1beta1 - - kubernetes/typed/policy/v1beta1 - - kubernetes/typed/rbac/v1alpha1 - - kubernetes/typed/storage/v1beta1 - - pkg/api - - pkg/api/errors - - pkg/api/install - - pkg/api/meta - - pkg/api/meta/metatypes - - pkg/api/resource - - pkg/api/unversioned - - pkg/api/v1 - - pkg/api/validation/path - - pkg/apimachinery - - pkg/apimachinery/announced - - pkg/apimachinery/registered - - pkg/apis/apps - - pkg/apis/apps/install - - pkg/apis/apps/v1beta1 - - pkg/apis/authentication - - pkg/apis/authentication/install - - pkg/apis/authentication/v1beta1 - - pkg/apis/authorization - - pkg/apis/authorization/install - - pkg/apis/authorization/v1beta1 - - pkg/apis/autoscaling - - pkg/apis/autoscaling/install - - pkg/apis/autoscaling/v1 - - pkg/apis/batch - - pkg/apis/batch/install - - pkg/apis/batch/v1 - - pkg/apis/batch/v2alpha1 - - pkg/apis/certificates - - pkg/apis/certificates/install - - pkg/apis/certificates/v1alpha1 - - pkg/apis/extensions - - pkg/apis/extensions/install - - pkg/apis/extensions/v1beta1 - - pkg/apis/policy - - pkg/apis/policy/install - - pkg/apis/policy/v1beta1 - - pkg/apis/rbac - - pkg/apis/rbac/install - - pkg/apis/rbac/v1alpha1 - - pkg/apis/storage - - pkg/apis/storage/install - - pkg/apis/storage/v1beta1 - - pkg/auth/user - - pkg/conversion - - pkg/conversion/queryparams - - pkg/fields - - pkg/genericapiserver/openapi/common - - pkg/labels - - pkg/runtime - - pkg/runtime/serializer - - pkg/runtime/serializer/json - - pkg/runtime/serializer/protobuf - - pkg/runtime/serializer/recognizer - - pkg/runtime/serializer/streaming - - pkg/runtime/serializer/versioning - - pkg/selection - - pkg/third_party/forked/golang/reflect - - pkg/third_party/forked/golang/template - - pkg/types - - pkg/util - - pkg/util/cert - - pkg/util/clock - - pkg/util/diff - - pkg/util/errors - - pkg/util/flowcontrol - - pkg/util/framer - - pkg/util/integer - - pkg/util/intstr - - pkg/util/json - - pkg/util/jsonpath - - pkg/util/labels - - pkg/util/net - - pkg/util/parsers - - pkg/util/rand - - pkg/util/runtime - - pkg/util/sets - - pkg/util/uuid - - pkg/util/validation - - pkg/util/validation/field - - pkg/util/wait - - pkg/util/yaml - - pkg/version - - pkg/watch - - pkg/watch/versioned - - plugin/pkg/client/auth - - plugin/pkg/client/auth/gcp - - plugin/pkg/client/auth/oidc - - rest - - tools/cache - - tools/clientcmd/api - - tools/metrics - - transport -testImports: -- name: github.com/Azure/go-ansiterm - version: d6e3b3328b783f23731bc4d058875b0371ff8109 - subpackages: - - winterm -- name: github.com/docker/cli - version: d95fd2f38cfc23e077530c6181330727d561b6a0 - subpackages: - - cli/command/image/build - - cli/config - - cli/config/configfile -- name: github.com/docker/libcompose - version: 1b708aac26a4fc6f9bff31728a8e3a252ef57dbd - subpackages: - - config - - docker - - docker/auth - - docker/builder - - docker/client - - docker/container - - docker/ctx - - docker/image - - docker/network - - docker/service - - docker/volume - - labels - - logger - - lookup - - project - - project/events - - project/options - - utils - - version - - yaml -- name: github.com/flynn/go-shlex - version: 3f9db97f856818214da2e1057f8ad84803971cff -- name: github.com/go-check/check - version: ca0bf163426aa183d03fd4949101785c0347f273 - repo: https://github.com/containous/check.git - vcs: git -- name: github.com/gorilla/mux - version: e444e69cbd2e2e3e0749a2f3c717cec491552bbf -- name: github.com/libkermit/compose - version: 4a33a16f1446ba205c4da7b09105d5bdc293b432 - subpackages: - - check -- name: github.com/libkermit/docker - version: ddede409294e8c5ae66d68ac09edb6b27e8f3e4a -- name: github.com/libkermit/docker-check - version: e0695005d6819191cf8969b479c94c40c8d22aa4 -- name: github.com/opencontainers/runc - version: b6b70e53451794e8333e9b602cc096b47a20bd0f - subpackages: - - libcontainer/system - - libcontainer/user -- name: github.com/stvp/go-udp-testing - version: c4434f09ec131ecf30f986d5dcb1636508bfa49a -- name: github.com/vdemeester/shakers - version: 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 -- name: github.com/xeipuuv/gojsonpointer - version: 6fe8760cad3569743d51ddbb243b26f8456742dc -- name: github.com/xeipuuv/gojsonreference - version: e02fc20de94c78484cd5ffb007f8af96be030a45 -- name: github.com/xeipuuv/gojsonschema - version: 0c8571ac0ce161a5feb57375a9cdf148c98c0f70 diff --git a/glide.yaml b/glide.yaml deleted file mode 100644 index 4baffe125..000000000 --- a/glide.yaml +++ /dev/null @@ -1,240 +0,0 @@ -package: github.com/containous/traefik -ignore: -- github.com/sirupsen/logrus -import: -- package: github.com/BurntSushi/toml - version: v0.3.0 -- package: github.com/BurntSushi/ty - subpackages: - - fun -- package: github.com/Sirupsen/logrus - version: 10f801ebc38b33738c9d17d50860f484a0988ff5 -- package: github.com/cenk/backoff -- package: github.com/containous/flaeg -- package: github.com/containous/traefik-extra-service-fabric - version: v1.0.5 -- package: github.com/vulcand/oxy - version: 812cebb8c764f2a78cb806267648b8728b4599ad - repo: https://github.com/containous/oxy.git - vcs: git - subpackages: - - cbreaker - - connlimit - - forward - - roundrobin - - stream - - utils - - ratelimit -- package: github.com/urfave/negroni - version: 490e6a555d47ca891a89a150d0c1ef3922dfffe9 -- package: github.com/containous/staert - version: ^v2.0.0 -- package: github.com/docker/docker - version: 75c7536d2e2e328b644bf69153de879d1d197988 -- package: github.com/docker/go-connections - version: e15c02316c12de00874640cd76311849de2aeed5 - subpackages: - - sockets - - tlsconfig -- package: github.com/docker/go-units - version: 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 -- package: github.com/coreos/etcd - version: v3.2.9 -- package: github.com/docker/libkv - repo: https://github.com/abronan/libkv.git - vcs: git - subpackages: - - store - - store/boltdb - - store/consul - - store/etcd/v2 - - store/etcd/v3 - - store/zookeeper -- package: github.com/elazarl/go-bindata-assetfs -- package: github.com/containous/mux -- package: github.com/hashicorp/consul - subpackages: - - api -- package: github.com/thoas/stats - version: 152b5d051953fdb6e45f14b6826962aadc032324 -- package: github.com/unrolled/render -- package: github.com/vulcand/vulcand - version: 42492a3a85e294bdbdd1bcabb8c12769a81ea284 - subpackages: - - plugin/rewrite -- package: github.com/vulcand/predicate - version: 19b9dde14240d94c804ae5736ad0e1de10bf8fe6 -- package: github.com/xenolf/lego - version: 67c86d860a797ce2483f50d9174d4ed24984bef2 - subpackages: - - acme -- package: gopkg.in/fsnotify.v1 -- package: github.com/mattn/go-shellwords -- package: github.com/ryanuber/go-glob -- package: github.com/mesos/mesos-go - subpackages: - - mesosproto - - mesos - - upid - - mesosutil - - detector -- package: github.com/miekg/dns - version: 8060d9f51305bbe024b99679454e62f552cd0b0b -- package: github.com/mesosphere/mesos-dns - version: b47dc4c19f215e98da687b15b4c64e70f629bea5 - repo: https://github.com/containous/mesos-dns.git - vcs: git -- package: github.com/abbot/go-http-auth -- package: github.com/NYTimes/gziphandler -- package: github.com/docker/leadership - repo: https://github.com/containous/leadership.git - vcs: git -- package: github.com/satori/go.uuid - version: ^1.1.0 -- package: k8s.io/client-go - version: v2.0.0 -- package: github.com/influxdata/influxdb - version: v1.3.7 - subpackages: - - client/v2 -- package: github.com/gambol99/go-marathon - version: dd6cbd4c2d71294a19fb89158f2a00d427f174ab -- package: github.com/ArthurHlt/go-eureka-client - subpackages: - - eureka -- package: github.com/coreos/go-systemd - version: v14 - subpackages: - - daemon -- package: github.com/google/go-github -- package: github.com/hashicorp/go-version -- package: github.com/mvdan/xurls -- package: github.com/go-kit/kit - version: v0.3.0 - subpackages: - - log - - metrics - - metrics/dogstatsd - - metrics/internal/lv - - metrics/internal/ratemap - - metrics/multi - - metrics/prometheus - - metrics/statsd - - util/conn - - metrics/influx -- package: github.com/prometheus/client_golang - version: 08fd2e12372a66e68e30523c7642e0cbc3e4fbde - subpackages: - - prometheus -- package: github.com/prometheus/common - version: 49fee292b27bfff7f354ee0f64e1bc4850462edf -- package: github.com/prometheus/client_model - version: 6f3806018612930941127f2a7c6c453ba2c527d2 -- package: github.com/prometheus/procfs - version: a1dba9ce8baed984a2495b658c82687f8157b98f -- package: github.com/matttproud/golang_protobuf_extensions - version: c12348ce28de40eed0136aa2b644d0ee0650e56c -- package: github.com/eapache/channels - version: v1.1.0 -- package: golang.org/x/sys - version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 -- package: golang.org/x/net - version: c8c74377599bd978aee1cf3b9b63a8634051cec2 - subpackages: - - http2 - - context - - websocket -- package: github.com/docker/distribution - version: b38e5838b7b2f2ad48e06ec4b500011976080621 -- package: github.com/opencontainers/go-digest - version: a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb -- package: github.com/opencontainers/image-spec - version: f03dbe35d449c54915d235f1a3cf8f585a24babe - subpackages: - - specs-go - - specs-go/v1 -- package: github.com/docker/libtrust - version: 9cbd2a1374f46905c68a4eb3694a130610adc62a -- package: github.com/aws/aws-sdk-go - version: v1.6.18 - subpackages: - - aws - - aws/credentials - - aws/defaults - - aws/ec2metadata - - aws/endpoints - - aws/request - - aws/session - - service/dynamodb - - service/dynamodb/dynamodbiface - - service/dynamodbattribute - - service/ec2 - - service/ecs -- package: cloud.google.com/go - version: v0.7.0 - subpackages: - - compute/metadata -- package: github.com/gogo/protobuf - version: v0.3 - subpackages: - - proto -- package: github.com/golang/protobuf - version: 4bd1920723d7b7c925de087aa32e2187708897f7 -- package: github.com/rancher/go-rancher - version: 52e2f489534007ae843065468c5a1920d542afa4 -- package: golang.org/x/oauth2 - version: 7fdf09982454086d5570c7db3e11f360194830ca - subpackages: - - google -- package: golang.org/x/time - version: 8be79e1e0910c292df4e79c241bb7e8f7e725959 -- package: github.com/rancher/go-rancher-metadata - version: d2103caca5873119ff423d29cba09b4d03cd69b8 -- package: github.com/googleapis/gax-go - version: 9af46dd5a1713e8b5cd71106287eba3cefdde50b -- package: google.golang.org/grpc - version: v1.5.2 -- package: github.com/unrolled/secure - version: 824e85271811af89640ea25620c67f6c2eed987e -- package: github.com/Nvveen/Gotty - version: 6018b68f96b839edfbe3fb48668853f5dbad88a3 - repo: https://github.com/ijc25/Gotty.git - vcs: git -- package: github.com/spf13/pflag - version: cb88ea77998c3f024757528e3305022ab50b43be -- package: github.com/stretchr/testify - version: 4d4bfba8f1d1027c4fdbe371823030df51419987 - subpackages: - - assert - - mock - - require -- package: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - subpackages: - - spew -- package: github.com/Masterminds/sprig - version: e039e20e500c2c025d9145be375e27cf42a94174 -- package: github.com/armon/go-proxyproto - version: 48572f11356f1843b694f21a290d4f1006bc5e47 -- package: github.com/mitchellh/copystructure -- package: github.com/mitchellh/hashstructure -testImport: -- package: github.com/stvp/go-udp-testing -- package: github.com/docker/libcompose - version: 1b708aac26a4fc6f9bff31728a8e3a252ef57dbd -- package: github.com/go-check/check - version: fork-containous - repo: https://github.com/containous/check.git - vcs: git -- package: github.com/libkermit/compose - version: 4a33a16f1446ba205c4da7b09105d5bdc293b432 - subpackages: - - check -- package: github.com/libkermit/docker - version: ddede409294e8c5ae66d68ac09edb6b27e8f3e4a -- package: github.com/libkermit/docker-check - version: e0695005d6819191cf8969b479c94c40c8d22aa4 -- package: github.com/mattn/go-shellwords -- package: github.com/vdemeester/shakers -- package: github.com/docker/cli - version: d95fd2f38cfc23e077530c6181330727d561b6a0 diff --git a/script/glide.sh b/script/glide.sh deleted file mode 100755 index 649125b12..000000000 --- a/script/glide.sh +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env bash -set -o errexit -set -o pipefail -set -o nounset - -#### -### Helper script for glide[-vc] to handle specifics for the Traefik repo. -## - -GLIDE_ARGS=() -GLIDE_VC_ARGS=( - '--use-lock-file' # `glide list` seems to miss test dependencies, e.g., github.com/mattn/go-shellwords - '--only-code' - '--no-tests' -) - -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -readonly SCRIPT_DIR -readonly GLIDE_DIR="${SCRIPT_DIR}/.." - -usage() { - echo "usage: $(basename "$0") install | update | get | trim -install: Install all dependencies and trim the vendor folder afterwards (alternative command: i). -update: Update all dependencies and trim the vendor folder afterwards (alternative command: up). -get: Add a dependency and trim the vendor folder afterwards. -trim: Trim the vendor folder only, do not install or update dependencies. - -The current working directory must contain a glide.yaml file." >&2 -} - -GLIDE_ARGS+=('--strip-vendor') - -if ! type glide > /dev/null 2>&1; then - echo "glide not found in PATH." >&2 - exit 1 -fi - -if ! type glide-vc > /dev/null 2>&1; then - echo "glide-vc not found in PATH." >&2 - exit 1 -fi - -if [[ ! -e "${GLIDE_DIR}/glide.yaml" ]]; then - echo "no glide.yaml file found in the current working directory" >&2 - exit 1 -fi - -if [[ $# -lt 1 ]]; then - echo "missing command" >&2 - usage - exit 1 -fi - -readonly glide_command="$1" -shift - -skip_glide_command= -case "${glide_command}" in - 'install' | 'i') - if [[ $# -ne 0 ]]; then - echo "surplus parameters given" >&2 - usage - exit 1 - fi - ;; - - 'update' | 'up') - if [[ $# -ne 0 ]]; then - echo "surplus parameters given" >&2 - usage - exit 1 - fi - ;; - - 'get') - if [[ $# -ne 1 ]]; then - echo 'insufficient/surplus arguments given for "get" command' >&2 - usage - exit 1 - fi - GLIDE_ARGS+=("$1") - shift - ;; - - 'trim') - if [[ $# -ne 0 ]]; then - echo "surplus parameters given" >&2 - usage - exit 1 - fi - skip_glide_command=yes - ;; - - *) - echo "unknown command: ${glide_command}" >&2 - usage - exit 1 -esac -readonly skip_glide_command - -if [[ -z "${skip_glide_command}" ]]; then - # Use parameter substitution to account for an empty glide arguments array - # that would otherwise lead to an "unbound variable" error due to the nounset - # option. - GLIDE_ARGS=("${GLIDE_ARGS+"${GLIDE_ARGS[@]}"}") - echo "running: glide ${glide_command} ${GLIDE_ARGS[*]}" - glide ${glide_command} ${GLIDE_ARGS[*]} -fi - -echo "trimming vendor folder using: glide-vc ${GLIDE_VC_ARGS[*]}" -glide-vc ${GLIDE_VC_ARGS[*]} diff --git a/script/prune-dep.sh b/script/prune-dep.sh new file mode 100755 index 000000000..27428ba62 --- /dev/null +++ b/script/prune-dep.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail +set -o nounset + +echo "Prune dependencies" + +dep prune + +find vendor -name '*_test.go' -exec rm {} \; + +find vendor -type f \( ! -iname 'licen[cs]e*' \ + -a ! -iname '*notice*' \ + -a ! -iname '*patent*' \ + -a ! -iname '*copying*' \ + -a ! -iname '*unlicense*' \ + -a ! -iname '*copyright*' \ + -a ! -iname '*copyleft*' \ + -a ! -iname '*legal*' \ + -a ! -iname 'disclaimer*' \ + -a ! -iname 'third-party*' \ + -a ! -iname 'thirdparty*' \ + -a ! -iname '*.go' \ + -a ! -iname '*.c' \ + -a ! -iname '*.S' \ + -a ! -iname '*.cc' \ + -a ! -iname '*.cpp' \ + -a ! -iname '*.cxx' \ + -a ! -iname '*.h' \ + -a ! -iname '*.hh' \ + -a ! -iname '*.hpp' \ + -a ! -iname '*.hxx' \ + -a ! -iname '*.s' \) -exec rm -f {} + diff --git a/script/validate-glide b/script/validate-glide deleted file mode 100755 index d60629e56..000000000 --- a/script/validate-glide +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -source "$(dirname "$BASH_SOURCE")/.validate" - -if grep -q "$(glide-hash)" glide.lock; then - echo 'Congratulations! glide.lock is unchanged.' -else - { - echo "Error: glide.lock has been manually changed. Don't do this. Use script/glide.sh up instead." - echo - } >&2 - false -fi diff --git a/script/validate-vendor b/script/validate-vendor index 9bc39b00b..55c687275 100755 --- a/script/validate-vendor +++ b/script/validate-vendor @@ -3,27 +3,28 @@ set -o errexit set -o pipefail set -o nounset -SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"; export SCRIPTDIR -source "${SCRIPTDIR}/.validate" +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"; export SCRIPT_DIR +source "${SCRIPT_DIR}/.validate" vendor_dir="./vendor/" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- ${vendor_dir} || true) ) if [[ ${#files[@]} -gt 0 ]]; then - # We run glide install to and see if we have a diff afterwards + # We run dep install to and see if we have a diff afterwards echo "checking ${vendor_dir} for unintentional changes..." - ( - "${SCRIPTDIR}/glide.sh" install - ) + + dep ensure -v + (${SCRIPT_DIR}/prune-dep.sh) + # Let see if the working directory is clean diffs="$(git status --porcelain -- ${vendor_dir} 2>/dev/null)" if [[ "$diffs" ]]; then { - echo "The result of 'glide install' for vendor directory '${vendor_dir}' differs" + echo "The result of 'dep ensure' for vendor directory '${vendor_dir}' differs" echo echo "$diffs" echo - echo 'Please vendor your package(s) with script/glide.sh.' + echo 'Please vendor your package(s) with dep.' echo } >&2 exit 2 diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING index 5a8e33254..01b574320 100644 --- a/vendor/github.com/BurntSushi/toml/COPYING +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -1,14 +1,21 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 +The MIT License (MIT) - Copyright (C) 2004 Sam Hocevar +Copyright (c) 2013 TOML authors - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index 6dee7fc79..e0a742a88 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -775,7 +775,7 @@ func lexDatetime(lx *lexer) stateFn { return lexDatetime } switch r { - case '-', 'T', ':', '.', 'Z': + case '-', 'T', ':', '.', 'Z', '+': return lexDatetime } diff --git a/vendor/github.com/aws/aws-sdk-go/service/generate.go b/vendor/github.com/aws/aws-sdk-go/service/generate.go new file mode 100644 index 000000000..3ffc9fcc5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/generate.go @@ -0,0 +1,5 @@ +// Package service contains automatically generated AWS clients. +package service + +//go:generate go run -tags codegen ../private/model/cli/gen-api/main.go -path=../service ../models/apis/*/*/api-2.json +//go:generate gofmt -s -w ../service diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE deleted file mode 100644 index 004e77fe5..000000000 --- a/vendor/github.com/boltdb/bolt/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go deleted file mode 100644 index 820d533c1..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_386.go +++ /dev/null @@ -1,10 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go deleted file mode 100644 index 98fafdb47..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_amd64.go +++ /dev/null @@ -1,10 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go deleted file mode 100644 index 7e5cb4b94..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_arm.go +++ /dev/null @@ -1,28 +0,0 @@ -package bolt - -import "unsafe" - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned bool - -func init() { - // Simple check to see whether this arch handles unaligned load/stores - // correctly. - - // ARM9 and older devices require load/stores to be from/to aligned - // addresses. If not, the lower 2 bits are cleared and that address is - // read in a jumbled up order. - - // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html - - raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} - val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) - - brokenUnaligned = val != 0x11222211 -} diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go deleted file mode 100644 index b26d84f91..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_arm64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build arm64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go deleted file mode 100644 index 2b6766614..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -package bolt - -import ( - "syscall" -) - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return syscall.Fdatasync(int(db.file.Fd())) -} diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go deleted file mode 100644 index 7058c3d73..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_openbsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package bolt - -import ( - "syscall" - "unsafe" -) - -const ( - msAsync = 1 << iota // perform asynchronous writes - msSync // perform synchronous writes - msInvalidate // invalidate cached data -) - -func msync(db *DB) error { - _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) - if errno != 0 { - return errno - } - return nil -} - -func fdatasync(db *DB) error { - if db.data != nil { - return msync(db) - } - return db.file.Sync() -} diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go deleted file mode 100644 index 645ddc3ed..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_ppc.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go deleted file mode 100644 index 9331d9771..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_ppc64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build ppc64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go deleted file mode 100644 index 8c143bc5d..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build ppc64le - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go deleted file mode 100644 index d7c39af92..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_s390x.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build s390x - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go deleted file mode 100644 index cad62dda1..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_unix.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build !windows,!plan9,!solaris - -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - flag := syscall.LOCK_SH - if exclusive { - flag = syscall.LOCK_EX - } - - // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) - if err == nil { - return nil - } else if err != syscall.EWOULDBLOCK { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := syscall.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} - -// NOTE: This function is copied from stdlib because it is not available on darwin. -func madvise(b []byte, advice int) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go deleted file mode 100644 index 307bf2b3e..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go +++ /dev/null @@ -1,90 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Whence = 0 - lock.Pid = 0 - if exclusive { - lock.Type = syscall.F_WRLCK - } else { - lock.Type = syscall.F_RDLCK - } - err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) - if err == nil { - return nil - } else if err != syscall.EAGAIN { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go deleted file mode 100644 index b00fb0720..000000000 --- a/vendor/github.com/boltdb/bolt/bolt_windows.go +++ /dev/null @@ -1,144 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") -) - -const ( - lockExt = ".lock" - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - flagLockExclusive = 2 - flagLockFailImmediately = 1 - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 -) - -func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r == 0 { - return err - } - return nil -} - -func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) - if r == 0 { - return err - } - return nil -} - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - // Create a separate lock file on windows because a process - // cannot share an exclusive lock on the same file. This is - // needed during Tx.WriteTo(). - f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) - if err != nil { - return err - } - db.lockfile = f - - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } - - err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) - if err == nil { - return nil - } else if err != errLockViolation { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) - db.lockfile.Close() - os.Remove(db.path + lockExt) - return err -} - -// mmap memory maps a DB's data file. -// Based on: https://github.com/edsrzf/mmap-go -func mmap(db *DB, sz int) error { - if !db.readOnly { - // Truncate the database to the size of the mmap. - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("truncate: %s", err) - } - } - - // Open a file mapping handle. - sizelo := uint32(sz >> 32) - sizehi := uint32(sz) & 0xffffffff - h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) - if h == 0 { - return os.NewSyscallError("CreateFileMapping", errno) - } - - // Create the memory map. - addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) - if addr == 0 { - return os.NewSyscallError("MapViewOfFile", errno) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { - return os.NewSyscallError("CloseHandle", err) - } - - // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) - db.datasz = sz - - return nil -} - -// munmap unmaps a pointer from a file. -// Based on: https://github.com/edsrzf/mmap-go -func munmap(db *DB) error { - if db.data == nil { - return nil - } - - addr := (uintptr)(unsafe.Pointer(&db.data[0])) - if err := syscall.UnmapViewOfFile(addr); err != nil { - return os.NewSyscallError("UnmapViewOfFile", err) - } - return nil -} diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go deleted file mode 100644 index f50442523..000000000 --- a/vendor/github.com/boltdb/bolt/boltsync_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows,!plan9,!linux,!openbsd - -package bolt - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go deleted file mode 100644 index 0c5bf2746..000000000 --- a/vendor/github.com/boltdb/bolt/bucket.go +++ /dev/null @@ -1,777 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "unsafe" -) - -const ( - // MaxKeySize is the maximum length of a key, in bytes. - MaxKeySize = 32768 - - // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = (1 << 31) - 2 -) - -const ( - maxUint = ^uint(0) - minUint = 0 - maxInt = int(^uint(0) >> 1) - minInt = -maxInt - 1 -) - -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - -const ( - minFillPercent = 0.1 - maxFillPercent = 1.0 -) - -// DefaultFillPercent is the percentage that split pages are filled. -// This value can be changed by setting Bucket.FillPercent. -const DefaultFillPercent = 0.5 - -// Bucket represents a collection of key/value pairs inside the database. -type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache - - // Sets the threshold for filling nodes when they split. By default, - // the bucket will fill to 50% but it can be useful to increase this - // amount if you know that your write workloads are mostly append-only. - // - // This is non-persisted across transactions so it must be set in every Tx. - FillPercent float64 -} - -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - -// newBucket returns a new bucket associated with a transaction. -func newBucket(tx *Tx) Bucket { - var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} - if tx.writable { - b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) - } - return b -} - -// Tx returns the tx of the bucket. -func (b *Bucket) Tx() *Tx { - return b.tx -} - -// Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root -} - -// Writable returns whether the bucket is writable. -func (b *Bucket) Writable() bool { - return b.tx.writable -} - -// Cursor creates a cursor associated with the bucket. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (b *Bucket) Cursor() *Cursor { - // Update transaction statistics. - b.tx.stats.CursorCount++ - - // Allocate and return a cursor. - return &Cursor{ - bucket: b, - stack: make([]elemRef, 0), - } -} - -// Bucket retrieves a nested bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) Bucket(name []byte) *Bucket { - if b.buckets != nil { - if child := b.buckets[string(name)]; child != nil { - return child - } - } - - // Move cursor to key. - c := b.Cursor() - k, v, flags := c.seek(name) - - // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { - return nil - } - - // Otherwise create a bucket and cache it. - var child = b.openBucket(v) - if b.buckets != nil { - b.buckets[string(name)] = child - } - - return child -} - -// Helper method that re-interprets a sub-bucket value -// from a parent into a Bucket -func (b *Bucket) openBucket(value []byte) *Bucket { - var child = newBucket(b.tx) - - // If unaligned load/stores are broken on this arch and value is - // unaligned simply clone to an aligned byte array. - unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 - - if unaligned { - value = cloneBytes(value) - } - - // If this is a writable transaction then we need to copy the bucket entry. - // Read-only transactions can point directly at the mmap entry. - if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) - } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) - } - - // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - } - - return &child -} - -// CreateBucket creates a new bucket at the given key and returns the new bucket. -// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { - if b.tx.db == nil { - return nil, ErrTxClosed - } else if !b.tx.writable { - return nil, ErrTxNotWritable - } else if len(key) == 0 { - return nil, ErrBucketNameRequired - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key. - if bytes.Equal(key, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists - } - return nil, ErrIncompatibleValue - } - - // Create empty, inline bucket. - var bucket = Bucket{ - bucket: &bucket{}, - rootNode: &node{isLeaf: true}, - FillPercent: DefaultFillPercent, - } - var value = bucket.write() - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) - - // Since subbuckets are not allowed on inline buckets, we need to - // dereference the inline page, if it exists. This will cause the bucket - // to be treated as a regular, non-inline bucket for the rest of the tx. - b.page = nil - - return b.Bucket(key), nil -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err - } - return child, nil -} - -// DeleteBucket deletes a bucket at the given key. -// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue - } - - // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEach(func(k, v []byte) error { - if v == nil { - if err := child.DeleteBucket(k); err != nil { - return fmt.Errorf("delete bucket: %s", err) - } - } - return nil - }) - if err != nil { - return err - } - - // Remove cached copy. - delete(b.buckets, string(key)) - - // Release all bucket pages to freelist. - child.nodes = nil - child.rootNode = nil - child.free() - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Get retrieves the value for a key in the bucket. -// Returns a nil value if the key does not exist or if the key is a nested bucket. -// The returned value is only valid for the life of the transaction. -func (b *Bucket) Get(key []byte) []byte { - k, v, flags := b.Cursor().seek(key) - - // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { - return nil - } - - // If our target node isn't the same key as what's passed in then return nil. - if !bytes.Equal(key, k) { - return nil - } - return v -} - -// Put sets the value for a key in the bucket. -// If the key exist then its previous value will be overwritten. -// Supplied value must remain valid for the life of the transaction. -// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } else if len(key) == 0 { - return ErrKeyRequired - } else if len(key) > MaxKeySize { - return ErrKeyTooLarge - } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) - - return nil -} - -// Delete removes a key from the bucket. -// If the key does not exist then nothing is done and a nil error is returned. -// Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - _, _, flags := c.seek(key) - - // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } - -// SetSequence updates the sequence number for the bucket. -func (b *Bucket) SetSequence(v uint64) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence = v - return nil -} - -// NextSequence returns an autoincrementing integer for the bucket. -func (b *Bucket) NextSequence() (uint64, error) { - if b.tx.db == nil { - return 0, ErrTxClosed - } else if !b.Writable() { - return 0, ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil -} - -// ForEach executes a function for each key/value pair in a bucket. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. The provided function must not modify -// the bucket; this will result in undefined behavior. -func (b *Bucket) ForEach(fn func(k, v []byte) error) error { - if b.tx.db == nil { - return ErrTxClosed - } - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if err := fn(k, v); err != nil { - return err - } - } - return nil -} - -// Stat returns stats on a bucket. -func (b *Bucket) Stats() BucketStats { - var s, subStats BucketStats - pageSize := b.tx.db.pageSize - s.BucketN += 1 - if b.root == 0 { - s.InlineBucketN += 1 - } - b.forEachPage(func(p *page, depth int) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) - - // used totals the used bytes for the page - used := pageHeaderSize - - if p.count != 0 { - // If page has any elements, add all element headers. - used += leafPageElementSize * int(p.count-1) - - // Add all element key, value sizes. - // The computation takes advantage of the fact that the position - // of the last element's key/value equals to the total of the sizes - // of all previous elements' keys and values. - // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) - } - - if b.root == 0 { - // For inlined bucket just update the inline stats - s.InlineBucketInuse += used - } else { - // For non-inlined bucket update all the leaf stats - s.LeafPageN++ - s.LeafInuse += used - s.LeafOverflowN += int(p.overflow) - - // Collect stats from sub-buckets. - // Do that by iterating over all element headers - // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { - // For any bucket element, open the element value - // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) - } - } - } - } else if (p.flags & branchPageFlag) != 0 { - s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) - - // used totals the used bytes for the page - // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) - - // Add size of all keys and values. - // Again, use the fact that last element's position equals to - // the total of key, value sizes of all previous elements. - used += int(lastElement.pos + lastElement.ksize) - s.BranchInuse += used - s.BranchOverflowN += int(p.overflow) - } - - // Keep track of maximum page depth. - if depth+1 > s.Depth { - s.Depth = (depth + 1) - } - }) - - // Alloc stats can be computed from page counts and pageSize. - s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize - s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize - - // Add the max depth of sub-buckets to get total nested depth. - s.Depth += subStats.Depth - // Add the stats for all sub-buckets - s.Add(subStats) - return s -} - -// forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int)) { - // If we have an inline page then just use that. - if b.page != nil { - fn(b.page, 0) - return - } - - // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, 0, fn) -} - -// forEachPageNode iterates over every page (or node) in a bucket. -// This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { - // If we have an inline page or root node then just use that. - if b.page != nil { - fn(b.page, nil, 0) - return - } - b._forEachPageNode(b.root, 0, fn) -} - -func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { - var p, n = b.pageNode(pgid) - - // Execute function. - fn(p, n, depth) - - // Recursively loop over children. - if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) - } - } - } else { - if !n.isLeaf { - for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) - } - } - } -} - -// spill writes all the nodes for this bucket to dirty pages. -func (b *Bucket) spill() error { - // Spill all child buckets first. - for name, child := range b.buckets { - // If the child bucket is small enough and it has no child buckets then - // write it inline into the parent bucket's page. Otherwise spill it - // like a normal bucket and make the parent value a pointer to the page. - var value []byte - if child.inlineable() { - child.free() - value = child.write() - } else { - if err := child.spill(); err != nil { - return err - } - - // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket - } - - // Skip writing the bucket if there are no materialized nodes. - if child.rootNode == nil { - continue - } - - // Update parent node. - var c = b.Cursor() - k, _, flags := c.seek([]byte(name)) - if !bytes.Equal([]byte(name), k) { - panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) - } - if flags&bucketLeafFlag == 0 { - panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) - } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) - } - - // Ignore if there's not a materialized root node. - if b.rootNode == nil { - return nil - } - - // Spill nodes. - if err := b.rootNode.spill(); err != nil { - return err - } - b.rootNode = b.rootNode.root() - - // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) - } - b.root = b.rootNode.pgid - - return nil -} - -// inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. -func (b *Bucket) inlineable() bool { - var n = b.rootNode - - // Bucket must only contain a single leaf node. - if n == nil || !n.isLeaf { - return false - } - - // Bucket is not inlineable if it contains subbuckets or if it goes beyond - // our threshold for inline bucket size. - var size = pageHeaderSize - for _, inode := range n.inodes { - size += leafPageElementSize + len(inode.key) + len(inode.value) - - if inode.flags&bucketLeafFlag != 0 { - return false - } else if size > b.maxInlineBucketSize() { - return false - } - } - - return true -} - -// Returns the maximum total size of a bucket to make it a candidate for inlining. -func (b *Bucket) maxInlineBucketSize() int { - return b.tx.db.pageSize / 4 -} - -// write allocates and writes a bucket to a byte slice. -func (b *Bucket) write() []byte { - // Allocate the appropriate size. - var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) - - // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket - - // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - n.write(p) - - return value -} - -// rebalance attempts to balance all nodes. -func (b *Bucket) rebalance() { - for _, n := range b.nodes { - n.rebalance() - } - for _, child := range b.buckets { - child.rebalance() - } -} - -// node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgid pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") - - // Retrieve node if it's already been created. - if n := b.nodes[pgid]; n != nil { - return n - } - - // Otherwise create a node and cache it. - n := &node{bucket: b, parent: parent} - if parent == nil { - b.rootNode = n - } else { - parent.children = append(parent.children, n) - } - - // Use the inline page if this is an inline bucket. - var p = b.page - if p == nil { - p = b.tx.page(pgid) - } - - // Read the page into the node and cache it. - n.read(p) - b.nodes[pgid] = n - - // Update statistics. - b.tx.stats.NodeCount++ - - return n -} - -// free recursively frees all pages in the bucket. -func (b *Bucket) free() { - if b.root == 0 { - return - } - - var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { - if p != nil { - tx.db.freelist.free(tx.meta.txid, p) - } else { - n.free() - } - }) - b.root = 0 -} - -// dereference removes all references to the old mmap. -func (b *Bucket) dereference() { - if b.rootNode != nil { - b.rootNode.root().dereference() - } - - for _, child := range b.buckets { - child.dereference() - } -} - -// pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { - // Inline buckets have a fake page embedded in their value so treat them - // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { - if id != 0 { - panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) - } - if b.rootNode != nil { - return nil, b.rootNode - } - return b.page, nil - } - - // Check the node cache for non-inline buckets. - if b.nodes != nil { - if n := b.nodes[id]; n != nil { - return nil, n - } - } - - // Finally lookup the page from the transaction if no node is materialized. - return b.tx.page(id), nil -} - -// BucketStats records statistics about resources used by a bucket. -type BucketStats struct { - // Page count statistics. - BranchPageN int // number of logical branch pages - BranchOverflowN int // number of physical branch overflow pages - LeafPageN int // number of logical leaf pages - LeafOverflowN int // number of physical leaf overflow pages - - // Tree statistics. - KeyN int // number of keys/value pairs - Depth int // number of levels in B+tree - - // Page size utilization. - BranchAlloc int // bytes allocated for physical branch pages - BranchInuse int // bytes actually used for branch data - LeafAlloc int // bytes allocated for physical leaf pages - LeafInuse int // bytes actually used for leaf data - - // Bucket statistics - BucketN int // total number of buckets including the top bucket - InlineBucketN int // total number on inlined buckets - InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) -} - -func (s *BucketStats) Add(other BucketStats) { - s.BranchPageN += other.BranchPageN - s.BranchOverflowN += other.BranchOverflowN - s.LeafPageN += other.LeafPageN - s.LeafOverflowN += other.LeafOverflowN - s.KeyN += other.KeyN - if s.Depth < other.Depth { - s.Depth = other.Depth - } - s.BranchAlloc += other.BranchAlloc - s.BranchInuse += other.BranchInuse - s.LeafAlloc += other.LeafAlloc - s.LeafInuse += other.LeafInuse - - s.BucketN += other.BucketN - s.InlineBucketN += other.InlineBucketN - s.InlineBucketInuse += other.InlineBucketInuse -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go deleted file mode 100644 index 1be9f35e3..000000000 --- a/vendor/github.com/boltdb/bolt/cursor.go +++ /dev/null @@ -1,400 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" -) - -// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. -// Cursors see nested buckets with value == nil. -// Cursors can be obtained from a transaction and are valid as long as the transaction is open. -// -// Keys and values returned from the cursor are only valid for the life of the transaction. -// -// Changing data while traversing with a cursor may cause it to be invalidated -// and return unexpected keys and/or values. You must reposition your cursor -// after mutating data. -type Cursor struct { - bucket *Bucket - stack []elemRef -} - -// Bucket returns the bucket that this cursor was created from. -func (c *Cursor) Bucket() *Bucket { - return c.bucket -} - -// First moves the cursor to the first item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - c.first() - - // If we land on an empty page then move to the next value. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - c.next() - } - - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v - -} - -// Last moves the cursor to the last item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - ref := elemRef{page: p, node: n} - ref.index = ref.count() - 1 - c.stack = append(c.stack, ref) - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Next moves the cursor to the next item in the bucket and returns its key and value. -// If the cursor is at the end of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Prev moves the cursor to the previous item in the bucket and returns its key and value. -// If the cursor is at the beginning of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Attempt to move back one element until we're successful. - // Move up the stack as we hit the beginning of each page in our stack. - for i := len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index > 0 { - elem.index-- - break - } - c.stack = c.stack[:i] - } - - // If we've hit the end then return nil. - if len(c.stack) == 0 { - return nil, nil - } - - // Move down the stack to find the last element of the last leaf under this branch. - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. If no keys -// follow, a nil key is returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - k, v, flags := c.seek(seek) - - // If we ended up after the last element of a page then move to the next one. - if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { - k, v, flags = c.next() - } - - if k == nil { - return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Delete removes the current key/value under the cursor from the bucket. -// Delete fails if current key/value is a bucket or if the transaction is not writable. -func (c *Cursor) Delete() error { - if c.bucket.tx.db == nil { - return ErrTxClosed - } else if !c.bucket.Writable() { - return ErrTxNotWritable - } - - key, _, flags := c.keyValue() - // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - c.node().del(key) - - return nil -} - -// seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. -func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Start from root page/node and traverse to correct page. - c.stack = c.stack[:0] - c.search(seek, c.bucket.root) - ref := &c.stack[len(c.stack)-1] - - // If the cursor is pointing to the end of page/node then return nil. - if ref.index >= ref.count() { - return nil, nil, 0 - } - - // If this is a bucket then return a nil value. - return c.keyValue() -} - -// first moves the cursor to the first leaf element under the last page in the stack. -func (c *Cursor) first() { - for { - // Exit when we hit a leaf page. - var ref = &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the first element to the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - } -} - -// last moves the cursor to the last leaf element under the last page in the stack. -func (c *Cursor) last() { - for { - // Exit when we hit a leaf page. - ref := &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the last element in the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - - var nextRef = elemRef{page: p, node: n} - nextRef.index = nextRef.count() - 1 - c.stack = append(c.stack, nextRef) - } -} - -// next moves to the next leaf element and returns the key and value. -// If the cursor is at the last leaf element then it stays there and returns nil. -func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - for { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break - } - } - - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } - - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.first() - - // If this is an empty page then restart and move back up the stack. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - continue - } - - return c.keyValue() - } -} - -// search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgid pgid) { - p, n := c.bucket.pageNode(pgid) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) - } - e := elemRef{page: p, node: n} - c.stack = append(c.stack, e) - - // If we're on a leaf page/node then find the specific node. - if e.isLeaf() { - c.nsearch(key) - return - } - - if n != nil { - c.searchNode(key, n) - return - } - c.searchPage(key, p) -} - -func (c *Cursor) searchNode(key []byte, n *node) { - var exact bool - index := sort.Search(len(n.inodes), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) -} - -func (c *Cursor) searchPage(key []byte, p *page) { - // Binary search for the correct range. - inodes := p.branchPageElements() - - var exact bool - index := sort.Search(int(p.count), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, inodes[index].pgid) -} - -// nsearch searches the leaf node on the top of the stack for a key. -func (c *Cursor) nsearch(key []byte) { - e := &c.stack[len(c.stack)-1] - p, n := e.page, e.node - - // If we have a node then search its inodes. - if n != nil { - index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 - }) - e.index = index - return - } - - // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 - }) - e.index = index -} - -// keyValue returns the key and value of the current leaf element. -func (c *Cursor) keyValue() ([]byte, []byte, uint32) { - ref := &c.stack[len(c.stack)-1] - if ref.count() == 0 || ref.index >= ref.count() { - return nil, nil, 0 - } - - // Retrieve value from node. - if ref.node != nil { - inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags - } - - // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags -} - -// node returns the node that the cursor is currently positioned on. -func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") - - // If the top of the stack is a leaf node then just return it. - if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { - return ref.node - } - - // Start from root and traverse down the hierarchy. - var n = c.stack[0].node - if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) - } - for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") - n = n.childAt(int(ref.index)) - } - _assert(n.isLeaf, "expected leaf node") - return n -} - -// elemRef represents a reference to an element on a given page/node. -type elemRef struct { - page *page - node *node - index int -} - -// isLeaf returns whether the ref is pointing at a leaf page/node. -func (r *elemRef) isLeaf() bool { - if r.node != nil { - return r.node.isLeaf - } - return (r.page.flags & leafPageFlag) != 0 -} - -// count returns the number of inodes or page elements. -func (r *elemRef) count() int { - if r.node != nil { - return len(r.node.inodes) - } - return int(r.page.count) -} diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go deleted file mode 100644 index f352ff14f..000000000 --- a/vendor/github.com/boltdb/bolt/db.go +++ /dev/null @@ -1,1039 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "hash/fnv" - "log" - "os" - "runtime" - "runtime/debug" - "strings" - "sync" - "time" - "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" - -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 -) - -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - -// DB represents a collection of buckets persisted to a file on disk. -// All data access is performed through transactions which can be obtained through the DB. -// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. -type DB struct { - // When enabled, the database will perform a Check() after every commit. - // A panic is issued if the database is in an inconsistent state. This - // flag has a large performance impact so it should only be used for - // debugging purposes. - StrictMode bool - - // Setting the NoSync flag will cause the database to skip fsync() - // calls after each commit. This can be useful when bulk loading data - // into a database and you can restart the bulk load in the event of - // a system failure or database corruption. Do not set this flag for - // normal use. - // - // If the package global IgnoreNoSync constant is true, this value is - // ignored. See the comment on that constant for more details. - // - // THIS IS UNSAFE. PLEASE USE WITH CAUTION. - NoSync bool - - // When true, skips the truncate call when growing the database. - // Setting this to true is only safe on non-ext3/ext4 systems. - // Skipping truncation avoids preallocation of hard drive space and - // bypasses a truncate() and fsync() syscall on remapping. - // - // https://github.com/boltdb/bolt/issues/284 - NoGrowSync bool - - // If you want to read the entire database fast, you can set MmapFlag to - // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. - MmapFlags int - - // MaxBatchSize is the maximum size of a batch. Default value is - // copied from DefaultMaxBatchSize in Open. - // - // If <=0, disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchSize int - - // MaxBatchDelay is the maximum delay before a batch starts. - // Default value is copied from DefaultMaxBatchDelay in Open. - // - // If <=0, effectively disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchDelay time.Duration - - // AllocSize is the amount of space allocated when the database - // needs to create new pages. This is done to amortize the cost - // of truncate() and fsync() when growing the data file. - AllocSize int - - path string - file *os.File - lockfile *os.File // windows only - dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte - datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta - pageSize int - opened bool - rwtx *Tx - txs []*Tx - freelist *freelist - stats Stats - - pagePool sync.Pool - - batchMu sync.Mutex - batch *batch - - rwlock sync.Mutex // Allows only one writer at a time. - metalock sync.Mutex // Protects meta page access. - mmaplock sync.RWMutex // Protects mmap access during remapping. - statlock sync.RWMutex // Protects stats access. - - ops struct { - writeAt func(b []byte, off int64) (n int, err error) - } - - // Read only mode. - // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. - readOnly bool -} - -// Path returns the path to currently open database file. -func (db *DB) Path() string { - return db.path -} - -// GoString returns the Go string representation of the database. -func (db *DB) GoString() string { - return fmt.Sprintf("bolt.DB{path:%q}", db.path) -} - -// String returns the string representation of the database. -func (db *DB) String() string { - return fmt.Sprintf("DB<%q>", db.path) -} - -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. -// Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - var db = &DB{opened: true} - - // Set default options if no options are provided. - if options == nil { - options = DefaultOptions - } - db.NoGrowSync = options.NoGrowSync - db.MmapFlags = options.MmapFlags - - // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize - - flag := os.O_RDWR - if options.ReadOnly { - flag = os.O_RDONLY - db.readOnly = true - } - - // Open data file and separate sync handler for metadata writes. - db.path = path - var err error - if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { - _ = db.close() - return nil, err - } - - // Lock file so that other processes using Bolt in read-write mode cannot - // use the database at the same time. This would cause corruption since - // the two processes would write meta pages and free pages separately. - // The database file is locked exclusively (only one process can grab the lock) - // if !options.ReadOnly. - // The database file is locked using the shared lock (more than one process may - // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { - _ = db.close() - return nil, err - } - - // Default values for test hooks - db.ops.writeAt = db.file.WriteAt - - // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { - return nil, err - } else if info.Size() == 0 { - // Initialize new files with meta pages. - if err := db.init(); err != nil { - return nil, err - } - } else { - // Read the first meta page to determine the page size. - var buf [0x1000]byte - if _, err := db.file.ReadAt(buf[:], 0); err == nil { - m := db.pageInBuffer(buf[:], 0).meta() - if err := m.validate(); err != nil { - // If we can't read the page size, we can assume it's the same - // as the OS -- since that's how the page size was chosen in the - // first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - db.pageSize = os.Getpagesize() - } else { - db.pageSize = int(m.pageSize) - } - } - } - - // Initialize page pool. - db.pagePool = sync.Pool{ - New: func() interface{} { - return make([]byte, db.pageSize) - }, - } - - // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { - _ = db.close() - return nil, err - } - - // Read in the freelist. - db.freelist = newFreelist() - db.freelist.read(db.page(db.meta().freelist)) - - // Mark the database as opened and return. - return db, nil -} - -// mmap opens the underlying memory-mapped file and initializes the meta references. -// minsz is the minimum size that the new mmap can be. -func (db *DB) mmap(minsz int) error { - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } - - // Ensure the size is at least the minimum size. - var size = int(info.Size()) - if size < minsz { - size = minsz - } - size, err = db.mmapSize(size) - if err != nil { - return err - } - - // Dereference all mmap references before unmapping. - if db.rwtx != nil { - db.rwtx.root.dereference() - } - - // Unmap existing data before continuing. - if err := db.munmap(); err != nil { - return err - } - - // Memory-map the data file as a byte slice. - if err := mmap(db, size); err != nil { - return err - } - - // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() - - // Validate the meta pages. We only return an error if both meta pages fail - // validation, since meta0 failing validation means that it wasn't saved - // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() - if err0 != nil && err1 != nil { - return err0 - } - - return nil -} - -// munmap unmaps the data file from memory. -func (db *DB) munmap() error { - if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) - } - return nil -} - -// mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 32KB and doubles until it reaches 1GB. -// Returns an error if the new mmap size is greater than the max allowed. -func (db *DB) mmapSize(size int) (int, error) { - // Double the size from 32KB until 1GB. - for i := uint(15); i <= 30; i++ { - if size <= 1< maxMapSize { - return 0, fmt.Errorf("mmap too large") - } - - // If larger than 1GB then grow by 1GB at a time. - sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder - } - - // Ensure that the mmap size is a multiple of the page size. - // This should always be true since we're incrementing in MBs. - pageSize := int64(db.pageSize) - if (sz % pageSize) != 0 { - sz = ((sz / pageSize) + 1) * pageSize - } - - // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize - } - - return int(sz), nil -} - -// init creates a new database file and initializes its meta pages. -func (db *DB) init() error { - // Set the page size to the OS page size. - db.pageSize = os.Getpagesize() - - // Create two meta pages on a buffer. - buf := make([]byte, db.pageSize*4) - for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf[:], pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag - - // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() - } - - // Write an empty freelist at page 3. - p := db.pageInBuffer(buf[:], pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 - - // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf[:], pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 - - // Write the buffer to our data file. - if _, err := db.ops.writeAt(buf, 0); err != nil { - return err - } - if err := fdatasync(db); err != nil { - return err - } - - return nil -} - -// Close releases all database resources. -// All transactions must be closed before closing the database. -func (db *DB) Close() error { - db.rwlock.Lock() - defer db.rwlock.Unlock() - - db.metalock.Lock() - defer db.metalock.Unlock() - - db.mmaplock.RLock() - defer db.mmaplock.RUnlock() - - return db.close() -} - -func (db *DB) close() error { - if !db.opened { - return nil - } - - db.opened = false - - db.freelist = nil - - // Clear ops. - db.ops.writeAt = nil - - // Close the mmap. - if err := db.munmap(); err != nil { - return err - } - - // Close file handles. - if db.file != nil { - // No need to unlock read-only file. - if !db.readOnly { - // Unlock the file. - if err := funlock(db); err != nil { - log.Printf("bolt.Close(): funlock error: %s", err) - } - } - - // Close the file descriptor. - if err := db.file.Close(); err != nil { - return fmt.Errorf("db file close: %s", err) - } - db.file = nil - } - - db.path = "" - return nil -} - -// Begin starts a new transaction. -// Multiple read-only transactions can be used concurrently but only one -// write transaction can be used at a time. Starting multiple write transactions -// will cause the calls to block and be serialized until the current write -// transaction finishes. -// -// Transactions should not be dependent on one another. Opening a read -// transaction and a write transaction in the same goroutine can cause the -// writer to deadlock because the database periodically needs to re-mmap itself -// as it grows and it cannot do that while a read transaction is open. -// -// If a long running read transaction (for example, a snapshot transaction) is -// needed, you might want to set DB.InitialMmapSize to a large enough value -// to avoid potential blocking of write transaction. -// -// IMPORTANT: You must close read-only transactions after you are finished or -// else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { - if writable { - return db.beginRWTx() - } - return db.beginTx() -} - -func (db *DB) beginTx() (*Tx, error) { - // Lock the meta pages while we initialize the transaction. We obtain - // the meta lock before the mmap lock because that's the order that the - // write transaction will obtain them. - db.metalock.Lock() - - // Obtain a read-only lock on the mmap. When the mmap is remapped it will - // obtain a write lock so all transactions must finish before it can be - // remapped. - db.mmaplock.RLock() - - // Exit if the database is not open yet. - if !db.opened { - db.mmaplock.RUnlock() - db.metalock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{} - t.init(db) - - // Keep track of transaction until it closes. - db.txs = append(db.txs, t) - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Update the transaction stats. - db.statlock.Lock() - db.stats.TxN++ - db.stats.OpenTxN = n - db.statlock.Unlock() - - return t, nil -} - -func (db *DB) beginRWTx() (*Tx, error) { - // If the database was opened with Options.ReadOnly, return an error. - if db.readOnly { - return nil, ErrDatabaseReadOnly - } - - // Obtain writer lock. This is released by the transaction when it closes. - // This enforces only one writer transaction at a time. - db.rwlock.Lock() - - // Once we have the writer lock then we can lock the meta pages so that - // we can set up the transaction. - db.metalock.Lock() - defer db.metalock.Unlock() - - // Exit if the database is not open yet. - if !db.opened { - db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{writable: true} - t.init(db) - db.rwtx = t - - // Free any pages associated with closed read-only transactions. - var minid txid = 0xFFFFFFFFFFFFFFFF - for _, t := range db.txs { - if t.meta.txid < minid { - minid = t.meta.txid - } - } - if minid > 0 { - db.freelist.release(minid - 1) - } - - return t, nil -} - -// removeTx removes a transaction from the database. -func (db *DB) removeTx(tx *Tx) { - // Release the read lock on the mmap. - db.mmaplock.RUnlock() - - // Use the meta lock to restrict access to the DB object. - db.metalock.Lock() - - // Remove the transaction. - for i, t := range db.txs { - if t == tx { - last := len(db.txs) - 1 - db.txs[i] = db.txs[last] - db.txs[last] = nil - db.txs = db.txs[:last] - break - } - } - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Merge statistics. - db.statlock.Lock() - db.stats.OpenTxN = n - db.stats.TxStats.add(&tx.stats) - db.statlock.Unlock() -} - -// Update executes a function within the context of a read-write managed transaction. -// If no error is returned from the function then the transaction is committed. -// If an error is returned then the entire transaction is rolled back. -// Any error that is returned from the function or returned from the commit is -// returned from the Update() method. -// -// Attempting to manually commit or rollback within the function will cause a panic. -func (db *DB) Update(fn func(*Tx) error) error { - t, err := db.Begin(true) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually commit. - t.managed = true - - // If an error is returned from the function then rollback and return error. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Commit() -} - -// View executes a function within the context of a managed read-only transaction. -// Any error that is returned from the function is returned from the View() method. -// -// Attempting to manually rollback within the function will cause a panic. -func (db *DB) View(fn func(*Tx) error) error { - t, err := db.Begin(false) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually rollback. - t.managed = true - - // If an error is returned from the function then pass it through. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - if err := t.Rollback(); err != nil { - return err - } - - return nil -} - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - if c.err != nil { - c.err <- err - } - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} - -// Sync executes fdatasync() against the database file handle. -// -// This is not necessary under normal operation, however, if you use NoSync -// then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } - -// Stats retrieves ongoing performance stats for the database. -// This is only updated when a transaction closes. -func (db *DB) Stats() Stats { - db.statlock.RLock() - defer db.statlock.RUnlock() - return db.stats -} - -// This is for internal access to the raw data bytes from the C cursor, use -// carefully, or not at all. -func (db *DB) Info() *Info { - return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} -} - -// page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) -} - -// pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) -} - -// meta retrieves the current meta page reference. -func (db *DB) meta() *meta { - // We have to return the meta with the highest txid which doesn't fail - // validation. Otherwise, we can cause errors when in fact the database is - // in a consistent state. metaA is the one with the higher txid. - metaA := db.meta0 - metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { - metaA = db.meta1 - metaB = db.meta0 - } - - // Use higher meta page if valid. Otherwise fallback to previous, if valid. - if err := metaA.validate(); err == nil { - return metaA - } else if err := metaB.validate(); err == nil { - return metaB - } - - // This should never be reached, because both meta1 and meta0 were validated - // on mmap() and we do fsync() on every write. - panic("bolt.DB.meta(): invalid meta pages") -} - -// allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(count int) (*page, error) { - // Allocate a temporary buffer for the page. - var buf []byte - if count == 1 { - buf = db.pagePool.Get().([]byte) - } else { - buf = make([]byte, count*db.pageSize) - } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) - - // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(count); p.id != 0 { - return p, nil - } - - // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize - if minsz >= db.datasz { - if err := db.mmap(minsz); err != nil { - return nil, fmt.Errorf("mmap allocate error: %s", err) - } - } - - // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) - - return p, nil -} - -// grow grows the size of the database to the given sz. -func (db *DB) grow(sz int) error { - // Ignore if the new size is less than available file size. - if sz <= db.filesz { - return nil - } - - // If the data is smaller than the alloc size then only allocate what's needed. - // Once it goes over the allocation size then allocate in chunks. - if db.datasz < db.AllocSize { - sz = db.datasz - } else { - sz += db.AllocSize - } - - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if runtime.GOOS != "windows" { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - - db.filesz = sz - return nil -} - -func (db *DB) IsReadOnly() bool { - return db.readOnly -} - -// Options represents the options that can be set when opening a database. -type Options struct { - // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. - Timeout time.Duration - - // Sets the DB.NoGrowSync flag before memory mapping the file. - NoGrowSync bool - - // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to - // grab a shared lock (UNIX). - ReadOnly bool - - // Sets the DB.MmapFlags flag before memory mapping the file. - MmapFlags int - - // InitialMmapSize is the initial mmap size of the database - // in bytes. Read transactions won't block write transaction - // if the InitialMmapSize is large enough to hold database mmap - // size. (See DB.Begin for more information) - // - // If <=0, the initial map size is 0. - // If initialMmapSize is smaller than the previous database size, - // it takes no effect. - InitialMmapSize int -} - -// DefaultOptions represent the options used if nil options are passed into Open(). -// No timeout is used which will cause Bolt to wait indefinitely for a lock. -var DefaultOptions = &Options{ - Timeout: 0, - NoGrowSync: false, -} - -// Stats represents statistics about the database. -type Stats struct { - // Freelist stats - FreePageN int // total number of free pages on the freelist - PendingPageN int // total number of pending pages on the freelist - FreeAlloc int // total bytes allocated in free pages - FreelistInuse int // total bytes used by the freelist - - // Transaction stats - TxN int // total number of started read transactions - OpenTxN int // number of currently open read transactions - - TxStats TxStats // global, ongoing stats. -} - -// Sub calculates and returns the difference between two sets of database stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *Stats) Sub(other *Stats) Stats { - if other == nil { - return *s - } - var diff Stats - diff.FreePageN = s.FreePageN - diff.PendingPageN = s.PendingPageN - diff.FreeAlloc = s.FreeAlloc - diff.FreelistInuse = s.FreelistInuse - diff.TxN = s.TxN - other.TxN - diff.TxStats = s.TxStats.Sub(&other.TxStats) - return diff -} - -func (s *Stats) add(other *Stats) { - s.TxStats.add(&other.TxStats) -} - -type Info struct { - Data uintptr - PageSize int -} - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != 0 && m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid { - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func printstack() { - stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") - fmt.Fprintln(os.Stderr, stack) -} diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go deleted file mode 100644 index cc937845d..000000000 --- a/vendor/github.com/boltdb/bolt/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Package bolt implements a low-level key/value store in pure Go. It supports -fully serializable transactions, ACID semantics, and lock-free MVCC with -multiple readers and a single writer. Bolt can be used for projects that -want a simple data store without the need to add large dependencies such as -Postgres or MySQL. - -Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is -optimized for fast read access and does not require recovery in the event of a -system crash. Transactions which have not finished committing will simply be -rolled back in the event of a crash. - -The design of Bolt is based on Howard Chu's LMDB database project. - -Bolt currently works on Windows, Mac OS X, and Linux. - - -Basics - -There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is -a collection of buckets and is represented by a single file on disk. A bucket is -a collection of unique keys that are associated with values. - -Transactions provide either read-only or read-write access to the database. -Read-only transactions can retrieve key/value pairs and can use Cursors to -iterate over the dataset sequentially. Read-write transactions can create and -delete buckets and can insert and remove keys. Only one read-write transaction -is allowed at a time. - - -Caveats - -The database uses a read-only, memory-mapped data file to ensure that -applications cannot corrupt the database, however, this means that keys and -values returned from Bolt cannot be changed. Writing to a read-only byte slice -will cause Go to panic. - -Keys and values retrieved from the database are only valid for the life of -the transaction. When used outside the transaction, these byte slices can -point to different data or can point to invalid memory which will cause a panic. - - -*/ -package bolt diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go deleted file mode 100644 index a3620a3eb..000000000 --- a/vendor/github.com/boltdb/bolt/errors.go +++ /dev/null @@ -1,71 +0,0 @@ -package bolt - -import "errors" - -// These errors can be returned when opening or calling methods on a DB. -var ( - // ErrDatabaseNotOpen is returned when a DB instance is accessed before it - // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") - - // ErrInvalid is returned when both meta pages on a database are invalid. - // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") - - // ErrVersionMismatch is returned when the data file was created with a - // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") - - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") - - // ErrTimeout is returned when a database cannot obtain an exclusive lock - // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") -) - -// These errors can occur when beginning or committing a Tx. -var ( - // ErrTxNotWritable is returned when performing a write operation on a - // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") - - // ErrTxClosed is returned when committing or rolling back a transaction - // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") - - // ErrDatabaseReadOnly is returned when a mutating transaction is started on a - // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") -) - -// These errors can occur when putting or deleting a value or a bucket. -var ( - // ErrBucketNotFound is returned when trying to access a bucket that has - // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") - - // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") - - // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") - - // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") - - // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") - - // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") - - // ErrIncompatibleValue is returned when trying create or delete a bucket - // on an existing non-bucket key or when trying to create or delete a - // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") -) diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go deleted file mode 100644 index aba48f58c..000000000 --- a/vendor/github.com/boltdb/bolt/freelist.go +++ /dev/null @@ -1,252 +0,0 @@ -package bolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - ids []pgid // all free and available free page ids. - pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist() *freelist { - return &freelist{ - pending: make(map[txid][]pgid), - cache: make(map[pgid]bool), - } -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// free_count returns count of free pages -func (f *freelist) free_count() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, list := range f.pending { - count += len(list) - } - return count -} - -// copyall copies into dst a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) - for _, list := range f.pending { - m = append(m, list...) - } - sort.Sort(m) - mergepgids(dst, f.ids, m) -} - -// allocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - var ids = f.pending[txid] - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if f.cache[id] { - panic(fmt.Sprintf("page %d already freed", id)) - } - - // Add to the freelist and cache. - ids = append(ids, id) - f.cache[id] = true - } - f.pending[txid] = ids -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, ids := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, ids...) - delete(f.pending, tid) - } - } - sort.Sort(m) - f.ids = pgids(f.ids).merge(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - for _, id := range f.pending[txid] { - delete(f.cache, id) - } - - // Remove pages from pending list. - delete(f.pending, txid) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgid pgid) bool { - return f.cache[pgid] -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - idx, count := 0, int(p.count) - if count == 0xFFFF { - idx = 1 - count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] - f.ids = make([]pgid, len(ids)) - copy(f.ids, ids) - - // Make sure they're sorted. - sort.Sort(pgids(f.ids)) - } - - // Rebuild the page cache. - f.reindex() -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - lenids := f.count() - if lenids == 0 { - p.count = uint16(lenids) - } else if lenids < 0xFFFF { - p.count = uint16(lenids) - f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) - } else { - p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) - f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.ids { - if !pcache[id] { - a = append(a, id) - } - } - f.ids = a - - // Once the available list is rebuilt then rebuild the free cache so that - // it includes the available and pending free pages. - f.reindex() -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - f.cache = make(map[pgid]bool, len(f.ids)) - for _, id := range f.ids { - f.cache[id] = true - } - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - f.cache[pendingID] = true - } - } -} diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go deleted file mode 100644 index 159318b22..000000000 --- a/vendor/github.com/boltdb/bolt/node.go +++ /dev/null @@ -1,604 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" - "unsafe" -) - -// node represents an in-memory, deserialized page. -type node struct { - bucket *Bucket - isLeaf bool - unbalanced bool - spilled bool - key []byte - pgid pgid - parent *node - children nodes - inodes inodes -} - -// root returns the top-level node this node is attached to. -func (n *node) root() *node { - if n.parent == nil { - return n - } - return n.parent.root() -} - -// minKeys returns the minimum number of inodes this node should have. -func (n *node) minKeys() int { - if n.isLeaf { - return 1 - } - return 2 -} - -// size returns the size of the node after serialization. -func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - } - return sz -} - -// sizeLessThan returns true if the node is less than a given size. -// This is an optimization to avoid calculating a large node when we only need -// to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v int) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - if sz >= v { - return false - } - } - return true -} - -// pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() int { - if n.isLeaf { - return leafPageElementSize - } - return branchPageElementSize -} - -// childAt returns the child node at a given index. -func (n *node) childAt(index int) *node { - if n.isLeaf { - panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) - } - return n.bucket.node(n.inodes[index].pgid, n) -} - -// childIndex returns the index of a given child node. -func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) - return index -} - -// numChildren returns the number of children. -func (n *node) numChildren() int { - return len(n.inodes) -} - -// nextSibling returns the next node with the same parent. -func (n *node) nextSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index >= n.parent.numChildren()-1 { - return nil - } - return n.parent.childAt(index + 1) -} - -// prevSibling returns the previous node with the same parent. -func (n *node) prevSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index == 0 { - return nil - } - return n.parent.childAt(index - 1) -} - -// put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { - if pgid >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) - } else if len(oldKey) <= 0 { - panic("put: zero-length old key") - } else if len(newKey) <= 0 { - panic("put: zero-length new key") - } - - // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) - - // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) - if !exact { - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[index+1:], n.inodes[index:]) - } - - inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgid - _assert(len(inode.key) > 0, "put: zero-length inode key") -} - -// del removes a key from the node. -func (n *node) del(key []byte) { - // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) - - // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { - return - } - - // Delete inode from the node. - n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) - - // Mark the node as needing rebalancing. - n.unbalanced = true -} - -// read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } - - // Save first key so we can find the node in the parent when we spill. - if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") - } else { - n.key = nil - } -} - -// write writes the items onto one or more pages. -func (n *node) write(p *page) { - // Initialize page. - if n.isLeaf { - p.flags |= leafPageFlag - } else { - p.flags |= branchPageFlag - } - - if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) - } - p.count = uint16(len(n.inodes)) - - // Stop here if there are no items to write. - if p.count == 0 { - return - } - - // Loop over each item and write it to the page. - b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // If the length of key+value is larger than the max allocation size - // then we need to reallocate the byte array pointer. - // - // See: https://github.com/boltdb/bolt/pull/335 - klen, vlen := len(item.key), len(item.value) - if len(b) < klen+vlen { - b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] - } - - // Write data for the element to the end of the page. - copy(b[0:], item.key) - b = b[klen:] - copy(b[0:], item.value) - b = b[vlen:] - } - - // DEBUG ONLY: n.dump() -} - -// split breaks up a node into multiple smaller nodes, if appropriate. -// This should only be called from the spill() function. -func (n *node) split(pageSize int) []*node { - var nodes []*node - - node := n - for { - // Split node into two. - a, b := node.splitTwo(pageSize) - nodes = append(nodes, a) - - // If we can't split then exit the loop. - if b == nil { - break - } - - // Set node to b so it gets split on the next iteration. - node = b - } - - return nodes -} - -// splitTwo breaks up a node into two smaller nodes, if appropriate. -// This should only be called from the split() function. -func (n *node) splitTwo(pageSize int) (*node, *node) { - // Ignore the split if the page doesn't have at least enough nodes for - // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { - return n, nil - } - - // Determine the threshold before starting a new node. - var fillPercent = n.bucket.FillPercent - if fillPercent < minFillPercent { - fillPercent = minFillPercent - } else if fillPercent > maxFillPercent { - fillPercent = maxFillPercent - } - threshold := int(float64(pageSize) * fillPercent) - - // Determine split position and sizes of the two pages. - splitIndex, _ := n.splitIndex(threshold) - - // Split node into two separate nodes. - // If there's no parent then we'll need to create one. - if n.parent == nil { - n.parent = &node{bucket: n.bucket, children: []*node{n}} - } - - // Create a new node and add it to the parent. - next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} - n.parent.children = append(n.parent.children, next) - - // Split inodes across two nodes. - next.inodes = n.inodes[splitIndex:] - n.inodes = n.inodes[:splitIndex] - - // Update the statistics. - n.bucket.tx.stats.Split++ - - return n, next -} - -// splitIndex finds the position where a page will fill a given threshold. -// It returns the index as well as the size of the first page. -// This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz int) { - sz = pageHeaderSize - - // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = i - inode := n.inodes[i] - elsize := n.pageElementSize() + len(inode.key) + len(inode.value) - - // If we have at least the minimum number of keys and adding another - // node would put us over the threshold then exit and return. - if i >= minKeysPerPage && sz+elsize > threshold { - break - } - - // Add the element size to the total size. - sz += elsize - } - - return -} - -// spill writes the nodes to dirty pages and splits nodes as it goes. -// Returns an error if dirty pages cannot be allocated. -func (n *node) spill() error { - var tx = n.bucket.tx - if n.spilled { - return nil - } - - // Spill child nodes first. Child nodes can materialize sibling nodes in - // the case of split-merge so we cannot use a range loop. We have to check - // the children size on every loop iteration. - sort.Sort(n.children) - for i := 0; i < len(n.children); i++ { - if err := n.children[i].spill(); err != nil { - return err - } - } - - // We no longer need the child list because it's only used for spill tracking. - n.children = nil - - // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(tx.db.pageSize) - for _, node := range nodes { - // Add node's page to the freelist if it's not new. - if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) - node.pgid = 0 - } - - // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) - if err != nil { - return err - } - - // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) - } - node.pgid = p.id - node.write(p) - node.spilled = true - - // Insert into parent inodes. - if node.parent != nil { - var key = node.key - if key == nil { - key = node.inodes[0].key - } - - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") - } - - // Update the statistics. - tx.stats.Spill++ - } - - // If the root node split and created a new root then we need to spill that - // as well. We'll clear out the children to make sure it doesn't try to respill. - if n.parent != nil && n.parent.pgid == 0 { - n.children = nil - return n.parent.spill() - } - - return nil -} - -// rebalance attempts to combine the node with sibling nodes if the node fill -// size is below a threshold or if there are not enough keys. -func (n *node) rebalance() { - if !n.unbalanced { - return - } - n.unbalanced = false - - // Update statistics. - n.bucket.tx.stats.Rebalance++ - - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 - if n.size() > threshold && len(n.inodes) > n.minKeys() { - return - } - - // Root node has special handling. - if n.parent == nil { - // If root node is a branch and only has one node then collapse it. - if !n.isLeaf && len(n.inodes) == 1 { - // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) - n.isLeaf = child.isLeaf - n.inodes = child.inodes[:] - n.children = child.children - - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent = n - } - } - - // Remove old child. - child.parent = nil - delete(n.bucket.nodes, child.pgid) - child.free() - } - - return - } - - // If node has no keys then just remove it. - if n.numChildren() == 0 { - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - n.parent.rebalance() - return - } - - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) - if useNextSibling { - target = n.nextSibling() - } else { - target = n.prevSibling() - } - - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - } - - // Either this node or the target node was deleted from the parent so rebalance it. - n.parent.rebalance() -} - -// removes a node from the list of in-memory children. -// This does not affect the inodes. -func (n *node) removeChild(target *node) { - for i, child := range n.children { - if child == target { - n.children = append(n.children[:i], n.children[i+1:]...) - return - } - } -} - -// dereference causes the node to copy all its inode key/value references to heap memory. -// This is required when the mmap is reallocated so inodes are not pointing to stale data. -func (n *node) dereference() { - if n.key != nil { - key := make([]byte, len(n.key)) - copy(key, n.key) - n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") - } - - for i := range n.inodes { - inode := &n.inodes[i] - - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") - - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value - } - - // Recursively dereference children. - for _, child := range n.children { - child.dereference() - } - - // Update statistics. - n.bucket.tx.stats.NodeDeref++ -} - -// free adds the node's underlying page to the freelist. -func (n *node) free() { - if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) - n.pgid = 0 - } -} - -// dump writes the contents of the node to STDERR for debugging purposes. -/* -func (n *node) dump() { - // Write node header. - var typ = "branch" - if n.isLeaf { - typ = "leaf" - } - warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) - - // Write out abbreviated version of each item. - for _, item := range n.inodes { - if n.isLeaf { - if item.flags&bucketLeafFlag != 0 { - bucket := (*bucket)(unsafe.Pointer(&item.value[0])) - warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) - } else { - warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) - } - } else { - warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) - } - } - warn("") -} -*/ - -type nodes []*node - -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/boltdb/bolt/page.go deleted file mode 100644 index cde403ae8..000000000 --- a/vendor/github.com/boltdb/bolt/page.go +++ /dev/null @@ -1,197 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) - -const minKeysPerPage = 2 - -const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) -const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafe.Pointer(&p.ptr)) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} - -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - _ = append(merged, follow...) -} diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go deleted file mode 100644 index 6700308a2..000000000 --- a/vendor/github.com/boltdb/bolt/tx.go +++ /dev/null @@ -1,684 +0,0 @@ -package bolt - -import ( - "fmt" - "io" - "os" - "sort" - "strings" - "time" - "unsafe" -) - -// txid represents the internal transaction identifier. -type txid uint64 - -// Tx represents a read-only or read/write transaction on the database. -// Read-only transactions can be used for retrieving values for keys and creating cursors. -// Read/write transactions can create and remove buckets and create and remove keys. -// -// IMPORTANT: You must commit or rollback transactions when you are done with -// them. Pages can not be reclaimed by the writer until no more transactions -// are using them. A long running read transaction can cause the database to -// quickly grow. -type Tx struct { - writable bool - managed bool - db *DB - meta *meta - root Bucket - pages map[pgid]*page - stats TxStats - commitHandlers []func() - - // WriteFlag specifies the flag for write-related methods like WriteTo(). - // Tx opens the database file with the specified flag to copy the data. - // - // By default, the flag is unset, which works well for mostly in-memory - // workloads. For databases that are much larger than available RAM, - // set the flag to syscall.O_DIRECT to avoid trashing the page cache. - WriteFlag int -} - -// init initializes the transaction. -func (tx *Tx) init(db *DB) { - tx.db = db - tx.pages = nil - - // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) - - // Copy over the root bucket. - tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root - - // Increment the transaction id and add a page cache for writable transactions. - if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) - } -} - -// ID returns the transaction id. -func (tx *Tx) ID() int { - return int(tx.meta.txid) -} - -// DB returns a reference to the database that created the transaction. -func (tx *Tx) DB() *DB { - return tx.db -} - -// Size returns current database size in bytes as seen by this transaction. -func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) -} - -// Writable returns whether the transaction can perform write operations. -func (tx *Tx) Writable() bool { - return tx.writable -} - -// Cursor creates a cursor associated with the root bucket. -// All items in the cursor will return a nil value because all root bucket keys point to buckets. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (tx *Tx) Cursor() *Cursor { - return tx.root.Cursor() -} - -// Stats retrieves a copy of the current transaction statistics. -func (tx *Tx) Stats() TxStats { - return tx.stats -} - -// Bucket retrieves a bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) Bucket(name []byte) *Bucket { - return tx.root.Bucket(name) -} - -// CreateBucket creates a new bucket. -// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { - return tx.root.CreateBucket(name) -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { - return tx.root.CreateBucketIfNotExists(name) -} - -// DeleteBucket deletes a bucket. -// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. -func (tx *Tx) DeleteBucket(name []byte) error { - return tx.root.DeleteBucket(name) -} - -// ForEach executes a function for each bucket in the root. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. -func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { - return tx.root.ForEach(func(k, v []byte) error { - if err := fn(k, tx.root.Bucket(k)); err != nil { - return err - } - return nil - }) -} - -// OnCommit adds a handler function to be executed after the transaction successfully commits. -func (tx *Tx) OnCommit(fn func()) { - tx.commitHandlers = append(tx.commitHandlers, fn) -} - -// Commit writes all changes to disk and updates the meta page. -// Returns an error if a disk write error occurs, or if Commit is -// called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") - if tx.db == nil { - return ErrTxClosed - } else if !tx.writable { - return ErrTxNotWritable - } - - // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. - - // Rebalance nodes which have had deletions. - var startTime = time.Now() - tx.root.rebalance() - if tx.stats.Rebalance > 0 { - tx.stats.RebalanceTime += time.Since(startTime) - } - - // spill data onto dirty pages. - startTime = time.Now() - if err := tx.root.spill(); err != nil { - tx.rollback() - return err - } - tx.stats.SpillTime += time.Since(startTime) - - // Free the old root bucket. - tx.meta.root.root = tx.root.root - - opgid := tx.meta.pgid - - // Free the freelist and allocate new pages for it. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err - } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() - return err - } - } - - // Write dirty pages to disk. - startTime = time.Now() - if err := tx.write(); err != nil { - tx.rollback() - return err - } - - // If strict mode is enabled then perform a consistency check. - // Only the first consistency error is reported in the panic. - if tx.db.StrictMode { - ch := tx.Check() - var errs []string - for { - err, ok := <-ch - if !ok { - break - } - errs = append(errs, err.Error()) - } - if len(errs) > 0 { - panic("check fail: " + strings.Join(errs, "\n")) - } - } - - // Write meta to disk. - if err := tx.writeMeta(); err != nil { - tx.rollback() - return err - } - tx.stats.WriteTime += time.Since(startTime) - - // Finalize the transaction. - tx.close() - - // Execute commit handlers now that the locks have been removed. - for _, fn := range tx.commitHandlers { - fn() - } - - return nil -} - -// Rollback closes the transaction and ignores all previous updates. Read-only -// transactions must be rolled back and not committed. -func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") - if tx.db == nil { - return ErrTxClosed - } - tx.rollback() - return nil -} - -func (tx *Tx) rollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) - } - tx.close() -} - -func (tx *Tx) close() { - if tx.db == nil { - return - } - if tx.writable { - // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() - - // Remove transaction ref & writer lock. - tx.db.rwtx = nil - tx.db.rwlock.Unlock() - - // Merge statistics. - tx.db.statlock.Lock() - tx.db.stats.FreePageN = freelistFreeN - tx.db.stats.PendingPageN = freelistPendingN - tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize - tx.db.stats.FreelistInuse = freelistAlloc - tx.db.stats.TxStats.add(&tx.stats) - tx.db.statlock.Unlock() - } else { - tx.db.removeTx(tx) - } - - // Clear all references. - tx.db = nil - tx.meta = nil - tx.root = Bucket{tx: tx} - tx.pages = nil -} - -// Copy writes the entire database to a writer. -// This function exists for backwards compatibility. Use WriteTo() instead. -func (tx *Tx) Copy(w io.Writer) error { - _, err := tx.WriteTo(w) - return err -} - -// WriteTo writes the entire database to a writer. -// If err == nil then exactly tx.Size() bytes will be written into the writer. -func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader with WriteFlag - f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) - if err != nil { - return 0, err - } - defer func() { _ = f.Close() }() - - // Generate a meta page. We use the same page data for both meta pages. - buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta - - // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() - nn, err := w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 0 copy: %s", err) - } - - // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() - nn, err = w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 1 copy: %s", err) - } - - // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { - return n, fmt.Errorf("seek: %s", err) - } - - // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) - n += wn - if err != nil { - return n, err - } - - return n, f.Close() -} - -// CopyFile copies the entire database to file at the given path. -// A reader transaction is maintained during the copy so it is safe to continue -// using the database while a copy is in progress. -func (tx *Tx) CopyFile(path string, mode os.FileMode) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) - if err != nil { - return err - } - - err = tx.Copy(f) - if err != nil { - _ = f.Close() - return err - } - return f.Close() -} - -// Check performs several consistency checks on the database for this transaction. -// An error is returned if any inconsistency is found. -// -// It can be safely run concurrently on a writable transaction. However, this -// incurs a high cost for large databases and databases with a lot of subbuckets -// because of caching. This overhead can be removed if running on a read-only -// transaction, however, it is not safe to execute other writer transactions at -// the same time. -func (tx *Tx) Check() <-chan error { - ch := make(chan error) - go tx.check(ch) - return ch -} - -func (tx *Tx) check(ch chan error) { - // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) - for _, id := range all { - if freed[id] { - ch <- fmt.Errorf("page %d: already freed", id) - } - freed[id] = true - } - - // Track every reachable page. - reachable := make(map[pgid]*page) - reachable[0] = tx.page(0) // meta0 - reachable[1] = tx.page(1) // meta1 - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) - } - - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, ch) - - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) - } - } - - // Close the channel to signal completion. - close(ch) -} - -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } - - // Check every page used by this bucket. - b.tx.forEachPage(b.root, 0, func(p *page, _ int) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) - } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references", int(id)) - } - reachable[id] = p - } - - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) - } - }) - - // Check each bucket within this bucket. - _ = b.ForEach(func(k, v []byte) error { - if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, ch) - } - return nil - }) -} - -// allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(count) - if err != nil { - return nil, err - } - - // Save to our page cache. - tx.pages[p.id] = p - - // Update statistics. - tx.stats.PageCount++ - tx.stats.PageAlloc += count * tx.db.pageSize - - return p, nil -} - -// write writes any dirty pages to disk. -func (tx *Tx) write() error { - // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) - for _, p := range tx.pages { - pages = append(pages, p) - } - // Clear out page cache early. - tx.pages = make(map[pgid]*page) - sort.Sort(pages) - - // Write pages to disk in order. - for _, p := range pages { - size := (int(p.overflow) + 1) * tx.db.pageSize - offset := int64(p.id) * int64(tx.db.pageSize) - - // Write out page in "max allocation" sized chunks. - ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) - for { - // Limit our write to our max allocation size. - sz := size - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 - } - - // Write chunk to disk. - buf := ptr[:sz] - if _, err := tx.db.ops.writeAt(buf, offset); err != nil { - return err - } - - // Update statistics. - tx.stats.Write++ - - // Exit inner for loop if we've written all the chunks. - size -= sz - if size == 0 { - break - } - - // Otherwise move offset forward and move pointer to next chunk. - offset += int64(sz) - ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) - } - } - - // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Put small pages back to page pool. - for _, p := range pages { - // Ignore page sizes over 1 page. - // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { - continue - } - - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] - - // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 - for i := range buf { - buf[i] = 0 - } - tx.db.pagePool.Put(buf) - } - - return nil -} - -// writeMeta writes the meta to the disk. -func (tx *Tx) writeMeta() error { - // Create a temporary buffer for the meta page. - buf := make([]byte, tx.db.pageSize) - p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) - - // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { - return err - } - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Update statistics. - tx.stats.Write++ - - return nil -} - -// page returns a reference to the page with a given id. -// If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { - // Check the dirty pages first. - if tx.pages != nil { - if p, ok := tx.pages[id]; ok { - return p - } - } - - // Otherwise return directly from the mmap. - return tx.db.page(id) -} - -// forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { - p := tx.page(pgid) - - // Execute function. - fn(p, depth) - - // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPage(elem.pgid, depth+1, fn) - } - } -} - -// Page returns page information for a given page number. -// This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { - if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { - return nil, nil - } - - // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ - ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), - } - - // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { - info.Type = "free" - } else { - info.Type = p.typ() - } - - return info, nil -} - -// TxStats represents statistics about the actions performed by the transaction. -type TxStats struct { - // Page statistics. - PageCount int // number of page allocations - PageAlloc int // total bytes allocated - - // Cursor statistics. - CursorCount int // number of cursors created - - // Node statistics - NodeCount int // number of node allocations - NodeDeref int // number of node dereferences - - // Rebalance statistics. - Rebalance int // number of node rebalances - RebalanceTime time.Duration // total time spent rebalancing - - // Split/Spill statistics. - Split int // number of nodes split - Spill int // number of nodes spilled - SpillTime time.Duration // total time spent spilling - - // Write statistics. - Write int // number of writes performed - WriteTime time.Duration // total time spent writing to disk -} - -func (s *TxStats) add(other *TxStats) { - s.PageCount += other.PageCount - s.PageAlloc += other.PageAlloc - s.CursorCount += other.CursorCount - s.NodeCount += other.NodeCount - s.NodeDeref += other.NodeDeref - s.Rebalance += other.Rebalance - s.RebalanceTime += other.RebalanceTime - s.Split += other.Split - s.Spill += other.Spill - s.SpillTime += other.SpillTime - s.Write += other.Write - s.WriteTime += other.WriteTime -} - -// Sub calculates and returns the difference between two sets of transaction stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *TxStats) Sub(other *TxStats) TxStats { - var diff TxStats - diff.PageCount = s.PageCount - other.PageCount - diff.PageAlloc = s.PageAlloc - other.PageAlloc - diff.CursorCount = s.CursorCount - other.CursorCount - diff.NodeCount = s.NodeCount - other.NodeCount - diff.NodeDeref = s.NodeDeref - other.NodeDeref - diff.Rebalance = s.Rebalance - other.Rebalance - diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime - diff.Split = s.Split - other.Split - diff.Spill = s.Spill - other.Spill - diff.SpillTime = s.SpillTime - other.SpillTime - diff.Write = s.Write - other.Write - diff.WriteTime = s.WriteTime - other.WriteTime - return diff -} diff --git a/vendor/github.com/cenk/backoff/backoff.go b/vendor/github.com/cenk/backoff/backoff.go index 2102c5f2d..3676ee405 100644 --- a/vendor/github.com/cenk/backoff/backoff.go +++ b/vendor/github.com/cenk/backoff/backoff.go @@ -15,7 +15,7 @@ import "time" // BackOff is a backoff policy for retrying an operation. type BackOff interface { // NextBackOff returns the duration to wait before retrying the operation, - // or backoff.Stop to indicate that no more retries should be made. + // or backoff. Stop to indicate that no more retries should be made. // // Example usage: // diff --git a/vendor/github.com/cenk/backoff/exponential.go b/vendor/github.com/cenk/backoff/exponential.go index cc2a164f2..d9de15a17 100644 --- a/vendor/github.com/cenk/backoff/exponential.go +++ b/vendor/github.com/cenk/backoff/exponential.go @@ -63,6 +63,7 @@ type ExponentialBackOff struct { currentInterval time.Duration startTime time.Time + random *rand.Rand } // Clock is an interface that returns current time for BackOff. @@ -88,6 +89,7 @@ func NewExponentialBackOff() *ExponentialBackOff { MaxInterval: DefaultMaxInterval, MaxElapsedTime: DefaultMaxElapsedTime, Clock: SystemClock, + random: rand.New(rand.NewSource(time.Now().UnixNano())), } b.Reset() return b @@ -116,13 +118,18 @@ func (b *ExponentialBackOff) NextBackOff() time.Duration { return Stop } defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + if b.random == nil { + b.random = rand.New(rand.NewSource(time.Now().UnixNano())) + } + return getRandomValueFromInterval(b.RandomizationFactor, b.random.Float64(), b.currentInterval) } // GetElapsedTime returns the elapsed time since an ExponentialBackOff instance // is created and is reset when Reset() is called. // -// The elapsed time is computed using time.Now().UnixNano(). +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. func (b *ExponentialBackOff) GetElapsedTime() time.Duration { return b.Clock.Now().Sub(b.startTime) } diff --git a/vendor/github.com/cenk/backoff/ticker.go b/vendor/github.com/cenk/backoff/ticker.go index 49a99718d..e742512fd 100644 --- a/vendor/github.com/cenk/backoff/ticker.go +++ b/vendor/github.com/cenk/backoff/ticker.go @@ -18,9 +18,12 @@ type Ticker struct { stopOnce sync.Once } -// NewTicker returns a new Ticker containing a channel that will send the time at times -// specified by the BackOff argument. Ticker is guaranteed to tick at least once. -// The channel is closed when Stop method is called or BackOff stops. +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. func NewTicker(b BackOff) *Ticker { c := make(chan time.Time) t := &Ticker{ @@ -29,6 +32,7 @@ func NewTicker(b BackOff) *Ticker { b: ensureContext(b), stop: make(chan struct{}), } + t.b.Reset() go t.run() runtime.SetFinalizer(t, (*Ticker).Stop) return t @@ -42,7 +46,6 @@ func (t *Ticker) Stop() { func (t *Ticker) run() { c := t.c defer close(c) - t.b.Reset() // Ticker is guaranteed to tick at least once. afterC := t.send(time.Now()) diff --git a/vendor/github.com/cenk/backoff/tries.go b/vendor/github.com/cenk/backoff/tries.go new file mode 100644 index 000000000..cfeefd9b7 --- /dev/null +++ b/vendor/github.com/cenk/backoff/tries.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/codahale/hdrhistogram/hdr.go b/vendor/github.com/codahale/hdrhistogram/hdr.go index 658f68a61..c97842926 100644 --- a/vendor/github.com/codahale/hdrhistogram/hdr.go +++ b/vendor/github.com/codahale/hdrhistogram/hdr.go @@ -48,12 +48,8 @@ func New(minValue, maxValue int64, sigfigs int) *Histogram { panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs)) } - largestValueWithSingleUnitResolution := 2 * power(10, int64(sigfigs)) - - // we need to shove these down to float32 or the math is wrong - a := float32(math.Log(float64(largestValueWithSingleUnitResolution))) - b := float32(math.Log(2)) - subBucketCountMagnitude := int32(math.Ceil(float64(a / b))) + largestValueWithSingleUnitResolution := 2 * math.Pow10(sigfigs) + subBucketCountMagnitude := int32(math.Ceil(math.Log2(float64(largestValueWithSingleUnitResolution)))) subBucketHalfCountMagnitude := subBucketCountMagnitude if subBucketHalfCountMagnitude < 1 { @@ -61,7 +57,7 @@ func New(minValue, maxValue int64, sigfigs int) *Histogram { } subBucketHalfCountMagnitude-- - unitMagnitude := int32(math.Floor(math.Log(float64(minValue)) / math.Log(2))) + unitMagnitude := int32(math.Floor(math.Log2(float64(minValue)))) if unitMagnitude < 0 { unitMagnitude = 0 } @@ -124,6 +120,11 @@ func (h *Histogram) Merge(from *Histogram) (dropped int64) { return } +// TotalCount returns total number of values recorded. +func (h *Histogram) TotalCount() int64 { + return h.totalCount +} + // Max returns the approximate maximum recorded value. func (h *Histogram) Max() int64 { var max int64 @@ -133,7 +134,7 @@ func (h *Histogram) Max() int64 { max = i.highestEquivalentValue } } - return h.lowestEquivalentValue(max) + return h.highestEquivalentValue(max) } // Min returns the approximate minimum recorded value. @@ -151,6 +152,9 @@ func (h *Histogram) Min() int64 { // Mean returns the approximate arithmetic mean of the recorded values. func (h *Histogram) Mean() float64 { + if h.totalCount == 0 { + return 0 + } var total int64 i := h.iterator() for i.next() { @@ -163,6 +167,10 @@ func (h *Histogram) Mean() float64 { // StdDev returns the approximate standard deviation of the recorded values. func (h *Histogram) StdDev() float64 { + if h.totalCount == 0 { + return 0 + } + mean := h.Mean() geometricDevTotal := 0.0 @@ -267,6 +275,49 @@ func (h *Histogram) CumulativeDistribution() []Bracket { return result } +// SignificantFigures returns the significant figures used to create the +// histogram +func (h *Histogram) SignificantFigures() int64 { + return h.significantFigures +} + +// LowestTrackableValue returns the lower bound on values that will be added +// to the histogram +func (h *Histogram) LowestTrackableValue() int64 { + return h.lowestTrackableValue +} + +// HighestTrackableValue returns the upper bound on values that will be added +// to the histogram +func (h *Histogram) HighestTrackableValue() int64 { + return h.highestTrackableValue +} + +// Histogram bar for plotting +type Bar struct { + From, To, Count int64 +} + +// Pretty print as csv for easy plotting +func (b Bar) String() string { + return fmt.Sprintf("%v, %v, %v\n", b.From, b.To, b.Count) +} + +// Distribution returns an ordered list of bars of the +// distribution of recorded values, counts can be normalized to a probability +func (h *Histogram) Distribution() (result []Bar) { + i := h.iterator() + for i.next() { + result = append(result, Bar{ + Count: i.countAtIdx, + From: h.lowestEquivalentValue(i.valueFromIdx), + To: i.highestEquivalentValue, + }) + } + + return result +} + // Equals returns true if the two Histograms are equivalent, false if not. func (h *Histogram) Equals(other *Histogram) bool { switch { @@ -300,11 +351,12 @@ func (h *Histogram) Export() *Snapshot { LowestTrackableValue: h.lowestTrackableValue, HighestTrackableValue: h.highestTrackableValue, SignificantFigures: h.significantFigures, - Counts: h.counts, + Counts: append([]int64(nil), h.counts...), // copy } } -// Import returns a new Histogram populated from the Snapshot data. +// Import returns a new Histogram populated from the Snapshot data (which the +// caller must stop accessing). func Import(s *Snapshot) *Histogram { h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures)) h.counts = s.Counts @@ -478,7 +530,7 @@ func (p *pIterator) next() bool { currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount) if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile { p.percentile = p.percentileToIteratorTo - halfDistance := math.Pow(2, (math.Log(100.0/(100.0-(p.percentileToIteratorTo)))/math.Log(2))+1) + halfDistance := math.Trunc(math.Pow(2, math.Trunc(math.Log2(100.0/(100.0-p.percentileToIteratorTo)))+1)) percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance p.percentileToIteratorTo += 100.0 / percentileReportingTicks return true @@ -510,12 +562,3 @@ func bitLen(x int64) (n int64) { } return } - -func power(base, exp int64) (n int64) { - n = 1 - for exp > 0 { - n *= base - exp-- - } - return -} diff --git a/vendor/github.com/coreos/bbolt/freelist.go b/vendor/github.com/coreos/bbolt/freelist.go index 78e71cbf2..266f15429 100644 --- a/vendor/github.com/coreos/bbolt/freelist.go +++ b/vendor/github.com/coreos/bbolt/freelist.go @@ -132,9 +132,9 @@ func (f *freelist) free(txid txid, p *page) { allocTxid, ok := f.allocs[p.id] if ok { delete(f.allocs, p.id) - } else if (p.flags & (freelistPageFlag | metaPageFlag)) != 0 { - // Safe to claim txid as allocating since these types are private to txid. - allocTxid = txid + } else if (p.flags & freelistPageFlag) != 0 { + // Freelist is always allocated by prior tx. + allocTxid = txid - 1 } for id := p.id; id <= p.id+pgid(p.overflow); id++ { @@ -233,6 +233,9 @@ func (f *freelist) freed(pgid pgid) bool { // read initializes the freelist from a freelist page. func (f *freelist) read(p *page) { + if (p.flags & freelistPageFlag) == 0 { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) + } // If the page.count is at the max uint16 value (64k) then it's considered // an overflow and the size of the freelist is stored as the first element. idx, count := 0, int(p.count) diff --git a/vendor/github.com/coreos/etcd/auth/doc.go b/vendor/github.com/coreos/etcd/auth/doc.go new file mode 100644 index 000000000..72741a107 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package auth provides client role authentication for accessing keys in etcd. +package auth diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go new file mode 100644 index 000000000..214ae48c8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/jwt.go @@ -0,0 +1,137 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "crypto/rsa" + "io/ioutil" + + jwt "github.com/dgrijalva/jwt-go" + "golang.org/x/net/context" +) + +type tokenJWT struct { + signMethod string + signKey *rsa.PrivateKey + verifyKey *rsa.PublicKey +} + +func (t *tokenJWT) enable() {} +func (t *tokenJWT) disable() {} +func (t *tokenJWT) invalidateUser(string) {} +func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } + +func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { + // rev isn't used in JWT, it is only used in simple token + var ( + username string + revision uint64 + ) + + parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { + return t.verifyKey, nil + }) + + switch err.(type) { + case nil: + if !parsed.Valid { + plog.Warningf("invalid jwt token: %s", token) + return nil, false + } + + claims := parsed.Claims.(jwt.MapClaims) + + username = claims["username"].(string) + revision = uint64(claims["revision"].(float64)) + default: + plog.Warningf("failed to parse jwt token: %s", err) + return nil, false + } + + return &AuthInfo{Username: username, Revision: revision}, true +} + +func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { + // Future work: let a jwt token include permission information would be useful for + // permission checking in proxy side. + tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod), + jwt.MapClaims{ + "username": username, + "revision": revision, + }) + + token, err := tk.SignedString(t.signKey) + if err != nil { + plog.Debugf("failed to sign jwt token: %s", err) + return "", err + } + + plog.Debugf("jwt token: %s", token) + + return token, err +} + +func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) { + for k, v := range opts { + switch k { + case "sign-method": + jwtSignMethod = v + case "pub-key": + jwtPubKeyPath = v + case "priv-key": + jwtPrivKeyPath = v + default: + plog.Errorf("unknown token specific option: %s", k) + return "", "", "", ErrInvalidAuthOpts + } + } + + return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil +} + +func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) { + jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts) + if err != nil { + return nil, ErrInvalidAuthOpts + } + + t := &tokenJWT{} + + t.signMethod = jwtSignMethod + + verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath) + if err != nil { + plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err) + return nil, err + } + t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) + if err != nil { + plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err) + return nil, err + } + + signBytes, err := ioutil.ReadFile(jwtPrivKeyPath) + if err != nil { + plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err) + return nil, err + } + t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) + if err != nil { + plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err) + return nil, err + } + + return t, nil +} diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go new file mode 100644 index 000000000..691b65ba3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go @@ -0,0 +1,133 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "github.com/coreos/etcd/auth/authpb" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/adt" +) + +func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions { + user := getUser(tx, userName) + if user == nil { + plog.Errorf("invalid user name %s", userName) + return nil + } + + readPerms := &adt.IntervalTree{} + writePerms := &adt.IntervalTree{} + + for _, roleName := range user.Roles { + role := getRole(tx, roleName) + if role == nil { + continue + } + + for _, perm := range role.KeyPermission { + var ivl adt.Interval + var rangeEnd []byte + + if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 { + rangeEnd = perm.RangeEnd + } + + if len(perm.RangeEnd) != 0 { + ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd) + } else { + ivl = adt.NewBytesAffinePoint(perm.Key) + } + + switch perm.PermType { + case authpb.READWRITE: + readPerms.Insert(ivl, struct{}{}) + writePerms.Insert(ivl, struct{}{}) + + case authpb.READ: + readPerms.Insert(ivl, struct{}{}) + + case authpb.WRITE: + writePerms.Insert(ivl, struct{}{}) + } + } + } + + return &unifiedRangePermissions{ + readPerms: readPerms, + writePerms: writePerms, + } +} + +func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { + if len(rangeEnd) == 1 && rangeEnd[0] == 0 { + rangeEnd = nil + } + + ivl := adt.NewBytesAffineInterval(key, rangeEnd) + switch permtyp { + case authpb.READ: + return cachedPerms.readPerms.Contains(ivl) + case authpb.WRITE: + return cachedPerms.writePerms.Contains(ivl) + default: + plog.Panicf("unknown auth type: %v", permtyp) + } + return false +} + +func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { + pt := adt.NewBytesAffinePoint(key) + switch permtyp { + case authpb.READ: + return cachedPerms.readPerms.Intersects(pt) + case authpb.WRITE: + return cachedPerms.writePerms.Intersects(pt) + default: + plog.Panicf("unknown auth type: %v", permtyp) + } + return false +} + +func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { + // assumption: tx is Lock()ed + _, ok := as.rangePermCache[userName] + if !ok { + perms := getMergedPerms(tx, userName) + if perms == nil { + plog.Errorf("failed to create a unified permission of user %s", userName) + return false + } + as.rangePermCache[userName] = perms + } + + if len(rangeEnd) == 0 { + return checkKeyPoint(as.rangePermCache[userName], key, permtyp) + } + + return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp) +} + +func (as *authStore) clearCachedPerm() { + as.rangePermCache = make(map[string]*unifiedRangePermissions) +} + +func (as *authStore) invalidateCachedPerm(userName string) { + delete(as.rangePermCache, userName) +} + +type unifiedRangePermissions struct { + readPerms *adt.IntervalTree + writePerms *adt.IntervalTree +} diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go new file mode 100644 index 000000000..94d92a115 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/simple_token.go @@ -0,0 +1,220 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +// CAUTION: This randum number based token mechanism is only for testing purpose. +// JWT based mechanism will be added in the near future. + +import ( + "crypto/rand" + "fmt" + "math/big" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" +) + +const ( + letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + defaultSimpleTokenLength = 16 +) + +// var for testing purposes +var ( + simpleTokenTTL = 5 * time.Minute + simpleTokenTTLResolution = 1 * time.Second +) + +type simpleTokenTTLKeeper struct { + tokens map[string]time.Time + donec chan struct{} + stopc chan struct{} + deleteTokenFunc func(string) + mu *sync.Mutex +} + +func (tm *simpleTokenTTLKeeper) stop() { + select { + case tm.stopc <- struct{}{}: + case <-tm.donec: + } + <-tm.donec +} + +func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) { + tm.tokens[token] = time.Now().Add(simpleTokenTTL) +} + +func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) { + if _, ok := tm.tokens[token]; ok { + tm.tokens[token] = time.Now().Add(simpleTokenTTL) + } +} + +func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) { + delete(tm.tokens, token) +} + +func (tm *simpleTokenTTLKeeper) run() { + tokenTicker := time.NewTicker(simpleTokenTTLResolution) + defer func() { + tokenTicker.Stop() + close(tm.donec) + }() + for { + select { + case <-tokenTicker.C: + nowtime := time.Now() + tm.mu.Lock() + for t, tokenendtime := range tm.tokens { + if nowtime.After(tokenendtime) { + tm.deleteTokenFunc(t) + delete(tm.tokens, t) + } + } + tm.mu.Unlock() + case <-tm.stopc: + return + } + } +} + +type tokenSimple struct { + indexWaiter func(uint64) <-chan struct{} + simpleTokenKeeper *simpleTokenTTLKeeper + simpleTokensMu sync.Mutex + simpleTokens map[string]string // token -> username +} + +func (t *tokenSimple) genTokenPrefix() (string, error) { + ret := make([]byte, defaultSimpleTokenLength) + + for i := 0; i < defaultSimpleTokenLength; i++ { + bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters)))) + if err != nil { + return "", err + } + + ret[i] = letters[bInt.Int64()] + } + + return string(ret), nil +} + +func (t *tokenSimple) assignSimpleTokenToUser(username, token string) { + t.simpleTokensMu.Lock() + _, ok := t.simpleTokens[token] + if ok { + plog.Panicf("token %s is alredy used", token) + } + + t.simpleTokens[token] = username + t.simpleTokenKeeper.addSimpleToken(token) + t.simpleTokensMu.Unlock() +} + +func (t *tokenSimple) invalidateUser(username string) { + if t.simpleTokenKeeper == nil { + return + } + t.simpleTokensMu.Lock() + for token, name := range t.simpleTokens { + if strings.Compare(name, username) == 0 { + delete(t.simpleTokens, token) + t.simpleTokenKeeper.deleteSimpleToken(token) + } + } + t.simpleTokensMu.Unlock() +} + +func (t *tokenSimple) enable() { + delf := func(tk string) { + if username, ok := t.simpleTokens[tk]; ok { + plog.Infof("deleting token %s for user %s", tk, username) + delete(t.simpleTokens, tk) + } + } + t.simpleTokenKeeper = &simpleTokenTTLKeeper{ + tokens: make(map[string]time.Time), + donec: make(chan struct{}), + stopc: make(chan struct{}), + deleteTokenFunc: delf, + mu: &t.simpleTokensMu, + } + go t.simpleTokenKeeper.run() +} + +func (t *tokenSimple) disable() { + t.simpleTokensMu.Lock() + tk := t.simpleTokenKeeper + t.simpleTokenKeeper = nil + t.simpleTokens = make(map[string]string) // invalidate all tokens + t.simpleTokensMu.Unlock() + if tk != nil { + tk.stop() + } +} + +func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) { + if !t.isValidSimpleToken(ctx, token) { + return nil, false + } + t.simpleTokensMu.Lock() + username, ok := t.simpleTokens[token] + if ok && t.simpleTokenKeeper != nil { + t.simpleTokenKeeper.resetSimpleToken(token) + } + t.simpleTokensMu.Unlock() + return &AuthInfo{Username: username, Revision: revision}, ok +} + +func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) { + // rev isn't used in simple token, it is only used in JWT + index := ctx.Value("index").(uint64) + simpleToken := ctx.Value("simpleToken").(string) + token := fmt.Sprintf("%s.%d", simpleToken, index) + t.assignSimpleTokenToUser(username, token) + + return token, nil +} + +func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool { + splitted := strings.Split(token, ".") + if len(splitted) != 2 { + return false + } + index, err := strconv.Atoi(splitted[1]) + if err != nil { + return false + } + + select { + case <-t.indexWaiter(uint64(index)): + return true + case <-ctx.Done(): + } + + return false +} + +func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple { + return &tokenSimple{ + simpleTokens: make(map[string]string), + indexWaiter: indexWaiter, + } +} diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go new file mode 100644 index 000000000..20b57f284 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/store.go @@ -0,0 +1,1059 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "bytes" + "encoding/binary" + "errors" + "sort" + "strings" + "sync" + "sync/atomic" + + "github.com/coreos/etcd/auth/authpb" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/pkg/capnslog" + "golang.org/x/crypto/bcrypt" + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +var ( + enableFlagKey = []byte("authEnabled") + authEnabled = []byte{1} + authDisabled = []byte{0} + + revisionKey = []byte("authRevision") + + authBucketName = []byte("auth") + authUsersBucketName = []byte("authUsers") + authRolesBucketName = []byte("authRoles") + + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth") + + ErrRootUserNotExist = errors.New("auth: root user does not exist") + ErrRootRoleNotExist = errors.New("auth: root user does not have root role") + ErrUserAlreadyExist = errors.New("auth: user already exists") + ErrUserEmpty = errors.New("auth: user name is empty") + ErrUserNotFound = errors.New("auth: user not found") + ErrRoleAlreadyExist = errors.New("auth: role already exists") + ErrRoleNotFound = errors.New("auth: role not found") + ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password") + ErrPermissionDenied = errors.New("auth: permission denied") + ErrRoleNotGranted = errors.New("auth: role is not granted to the user") + ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role") + ErrAuthNotEnabled = errors.New("auth: authentication is not enabled") + ErrAuthOldRevision = errors.New("auth: revision in header is old") + ErrInvalidAuthToken = errors.New("auth: invalid auth token") + ErrInvalidAuthOpts = errors.New("auth: invalid auth options") + ErrInvalidAuthMgmt = errors.New("auth: invalid auth management") + + // BcryptCost is the algorithm cost / strength for hashing auth passwords + BcryptCost = bcrypt.DefaultCost +) + +const ( + rootUser = "root" + rootRole = "root" + + revBytesLen = 8 +) + +type AuthInfo struct { + Username string + Revision uint64 +} + +type AuthStore interface { + // AuthEnable turns on the authentication feature + AuthEnable() error + + // AuthDisable turns off the authentication feature + AuthDisable() + + // Authenticate does authentication based on given user name and password + Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) + + // Recover recovers the state of auth store from the given backend + Recover(b backend.Backend) + + // UserAdd adds a new user + UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) + + // UserDelete deletes a user + UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) + + // UserChangePassword changes a password of a user + UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) + + // UserGrantRole grants a role to the user + UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) + + // UserGet gets the detailed information of a users + UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) + + // UserRevokeRole revokes a role of a user + UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) + + // RoleAdd adds a new role + RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) + + // RoleGrantPermission grants a permission to a role + RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) + + // RoleGet gets the detailed information of a role + RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) + + // RoleRevokePermission gets the detailed information of a role + RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) + + // RoleDelete gets the detailed information of a role + RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) + + // UserList gets a list of all users + UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) + + // RoleList gets a list of all roles + RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) + + // IsPutPermitted checks put permission of the user + IsPutPermitted(authInfo *AuthInfo, key []byte) error + + // IsRangePermitted checks range permission of the user + IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error + + // IsDeleteRangePermitted checks delete-range permission of the user + IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error + + // IsAdminPermitted checks admin permission of the user + IsAdminPermitted(authInfo *AuthInfo) error + + // GenTokenPrefix produces a random string in a case of simple token + // in a case of JWT, it produces an empty string + GenTokenPrefix() (string, error) + + // Revision gets current revision of authStore + Revision() uint64 + + // CheckPassword checks a given pair of username and password is correct + CheckPassword(username, password string) (uint64, error) + + // Close does cleanup of AuthStore + Close() error + + // AuthInfoFromCtx gets AuthInfo from gRPC's context + AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) + + // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context + AuthInfoFromTLS(ctx context.Context) *AuthInfo +} + +type TokenProvider interface { + info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) + assign(ctx context.Context, username string, revision uint64) (string, error) + enable() + disable() + + invalidateUser(string) + genTokenPrefix() (string, error) +} + +type authStore struct { + // atomic operations; need 64-bit align, or 32-bit tests will crash + revision uint64 + + be backend.Backend + enabled bool + enabledMu sync.RWMutex + + rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions + + tokenProvider TokenProvider +} + +func (as *authStore) AuthEnable() error { + as.enabledMu.Lock() + defer as.enabledMu.Unlock() + if as.enabled { + plog.Noticef("Authentication already enabled") + return nil + } + b := as.be + tx := b.BatchTx() + tx.Lock() + defer func() { + tx.Unlock() + b.ForceCommit() + }() + + u := getUser(tx, rootUser) + if u == nil { + return ErrRootUserNotExist + } + + if !hasRootRole(u) { + return ErrRootRoleNotExist + } + + tx.UnsafePut(authBucketName, enableFlagKey, authEnabled) + + as.enabled = true + as.tokenProvider.enable() + + as.rangePermCache = make(map[string]*unifiedRangePermissions) + + as.setRevision(getRevision(tx)) + + plog.Noticef("Authentication enabled") + + return nil +} + +func (as *authStore) AuthDisable() { + as.enabledMu.Lock() + defer as.enabledMu.Unlock() + if !as.enabled { + return + } + b := as.be + tx := b.BatchTx() + tx.Lock() + tx.UnsafePut(authBucketName, enableFlagKey, authDisabled) + as.commitRevision(tx) + tx.Unlock() + b.ForceCommit() + + as.enabled = false + as.tokenProvider.disable() + + plog.Noticef("Authentication disabled") +} + +func (as *authStore) Close() error { + as.enabledMu.Lock() + defer as.enabledMu.Unlock() + if !as.enabled { + return nil + } + as.tokenProvider.disable() + return nil +} + +func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) { + if !as.isAuthEnabled() { + return nil, ErrAuthNotEnabled + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, username) + if user == nil { + return nil, ErrAuthFailed + } + + // Password checking is already performed in the API layer, so we don't need to check for now. + // Staleness of password can be detected with OCC in the API layer, too. + + token, err := as.tokenProvider.assign(ctx, username, as.Revision()) + if err != nil { + return nil, err + } + + plog.Debugf("authorized %s, token is %s", username, token) + return &pb.AuthenticateResponse{Token: token}, nil +} + +func (as *authStore) CheckPassword(username, password string) (uint64, error) { + if !as.isAuthEnabled() { + return 0, ErrAuthNotEnabled + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, username) + if user == nil { + return 0, ErrAuthFailed + } + + if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil { + plog.Noticef("authentication failed, invalid password for user %s", username) + return 0, ErrAuthFailed + } + + return getRevision(tx), nil +} + +func (as *authStore) Recover(be backend.Backend) { + enabled := false + as.be = be + tx := be.BatchTx() + tx.Lock() + _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0) + if len(vs) == 1 { + if bytes.Equal(vs[0], authEnabled) { + enabled = true + } + } + + as.setRevision(getRevision(tx)) + + tx.Unlock() + + as.enabledMu.Lock() + as.enabled = enabled + as.enabledMu.Unlock() +} + +func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + if len(r.Name) == 0 { + return nil, ErrUserEmpty + } + + hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) + if err != nil { + plog.Errorf("failed to hash password: %s", err) + return nil, err + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, r.Name) + if user != nil { + return nil, ErrUserAlreadyExist + } + + newUser := &authpb.User{ + Name: []byte(r.Name), + Password: hashed, + } + + putUser(tx, newUser) + + as.commitRevision(tx) + + plog.Noticef("added a new user: %s", r.Name) + + return &pb.AuthUserAddResponse{}, nil +} + +func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + if as.enabled && strings.Compare(r.Name, rootUser) == 0 { + plog.Errorf("the user root must not be deleted") + return nil, ErrInvalidAuthMgmt + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, r.Name) + if user == nil { + return nil, ErrUserNotFound + } + + delUser(tx, r.Name) + + as.commitRevision(tx) + + as.invalidateCachedPerm(r.Name) + as.tokenProvider.invalidateUser(r.Name) + + plog.Noticef("deleted a user: %s", r.Name) + + return &pb.AuthUserDeleteResponse{}, nil +} + +func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + // TODO(mitake): measure the cost of bcrypt.GenerateFromPassword() + // If the cost is too high, we should move the encryption to outside of the raft + hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) + if err != nil { + plog.Errorf("failed to hash password: %s", err) + return nil, err + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, r.Name) + if user == nil { + return nil, ErrUserNotFound + } + + updatedUser := &authpb.User{ + Name: []byte(r.Name), + Roles: user.Roles, + Password: hashed, + } + + putUser(tx, updatedUser) + + as.commitRevision(tx) + + as.invalidateCachedPerm(r.Name) + as.tokenProvider.invalidateUser(r.Name) + + plog.Noticef("changed a password of a user: %s", r.Name) + + return &pb.AuthUserChangePasswordResponse{}, nil +} + +func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, r.User) + if user == nil { + return nil, ErrUserNotFound + } + + if r.Role != rootRole { + role := getRole(tx, r.Role) + if role == nil { + return nil, ErrRoleNotFound + } + } + + idx := sort.SearchStrings(user.Roles, r.Role) + if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 { + plog.Warningf("user %s is already granted role %s", r.User, r.Role) + return &pb.AuthUserGrantRoleResponse{}, nil + } + + user.Roles = append(user.Roles, r.Role) + sort.Sort(sort.StringSlice(user.Roles)) + + putUser(tx, user) + + as.invalidateCachedPerm(r.User) + + as.commitRevision(tx) + + plog.Noticef("granted role %s to user %s", r.Role, r.User) + return &pb.AuthUserGrantRoleResponse{}, nil +} + +func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + var resp pb.AuthUserGetResponse + + user := getUser(tx, r.Name) + if user == nil { + return nil, ErrUserNotFound + } + resp.Roles = append(resp.Roles, user.Roles...) + return &resp, nil +} + +func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + var resp pb.AuthUserListResponse + + users := getAllUsers(tx) + + for _, u := range users { + resp.Users = append(resp.Users, string(u.Name)) + } + + return &resp, nil +} + +func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 { + plog.Errorf("the role root must not be revoked from the user root") + return nil, ErrInvalidAuthMgmt + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, r.Name) + if user == nil { + return nil, ErrUserNotFound + } + + updatedUser := &authpb.User{ + Name: user.Name, + Password: user.Password, + } + + for _, role := range user.Roles { + if strings.Compare(role, r.Role) != 0 { + updatedUser.Roles = append(updatedUser.Roles, role) + } + } + + if len(updatedUser.Roles) == len(user.Roles) { + return nil, ErrRoleNotGranted + } + + putUser(tx, updatedUser) + + as.invalidateCachedPerm(r.Name) + + as.commitRevision(tx) + + plog.Noticef("revoked role %s from user %s", r.Role, r.Name) + return &pb.AuthUserRevokeRoleResponse{}, nil +} + +func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + var resp pb.AuthRoleGetResponse + + role := getRole(tx, r.Role) + if role == nil { + return nil, ErrRoleNotFound + } + resp.Perm = append(resp.Perm, role.KeyPermission...) + return &resp, nil +} + +func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + var resp pb.AuthRoleListResponse + + roles := getAllRoles(tx) + + for _, r := range roles { + resp.Roles = append(resp.Roles, string(r.Name)) + } + + return &resp, nil +} + +func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + role := getRole(tx, r.Role) + if role == nil { + return nil, ErrRoleNotFound + } + + updatedRole := &authpb.Role{ + Name: role.Name, + } + + for _, perm := range role.KeyPermission { + if !bytes.Equal(perm.Key, []byte(r.Key)) || !bytes.Equal(perm.RangeEnd, []byte(r.RangeEnd)) { + updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm) + } + } + + if len(role.KeyPermission) == len(updatedRole.KeyPermission) { + return nil, ErrPermissionNotGranted + } + + putRole(tx, updatedRole) + + // TODO(mitake): currently single role update invalidates every cache + // It should be optimized. + as.clearCachedPerm() + + as.commitRevision(tx) + + plog.Noticef("revoked key %s from role %s", r.Key, r.Role) + return &pb.AuthRoleRevokePermissionResponse{}, nil +} + +func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + if as.enabled && strings.Compare(r.Role, rootRole) == 0 { + plog.Errorf("the role root must not be deleted") + return nil, ErrInvalidAuthMgmt + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + role := getRole(tx, r.Role) + if role == nil { + return nil, ErrRoleNotFound + } + + delRole(tx, r.Role) + + users := getAllUsers(tx) + for _, user := range users { + updatedUser := &authpb.User{ + Name: user.Name, + Password: user.Password, + } + + for _, role := range user.Roles { + if strings.Compare(role, r.Role) != 0 { + updatedUser.Roles = append(updatedUser.Roles, role) + } + } + + if len(updatedUser.Roles) == len(user.Roles) { + continue + } + + putUser(tx, updatedUser) + + as.invalidateCachedPerm(string(user.Name)) + } + + as.commitRevision(tx) + + plog.Noticef("deleted role %s", r.Role) + return &pb.AuthRoleDeleteResponse{}, nil +} + +func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + role := getRole(tx, r.Name) + if role != nil { + return nil, ErrRoleAlreadyExist + } + + newRole := &authpb.Role{ + Name: []byte(r.Name), + } + + putRole(tx, newRole) + + as.commitRevision(tx) + + plog.Noticef("Role %s is created", r.Name) + + return &pb.AuthRoleAddResponse{}, nil +} + +func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) { + return as.tokenProvider.info(ctx, token, as.Revision()) +} + +type permSlice []*authpb.Permission + +func (perms permSlice) Len() int { + return len(perms) +} + +func (perms permSlice) Less(i, j int) bool { + return bytes.Compare(perms[i].Key, perms[j].Key) < 0 +} + +func (perms permSlice) Swap(i, j int) { + perms[i], perms[j] = perms[j], perms[i] +} + +func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + role := getRole(tx, r.Name) + if role == nil { + return nil, ErrRoleNotFound + } + + idx := sort.Search(len(role.KeyPermission), func(i int) bool { + return bytes.Compare(role.KeyPermission[i].Key, []byte(r.Perm.Key)) >= 0 + }) + + if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) { + // update existing permission + role.KeyPermission[idx].PermType = r.Perm.PermType + } else { + // append new permission to the role + newPerm := &authpb.Permission{ + Key: []byte(r.Perm.Key), + RangeEnd: []byte(r.Perm.RangeEnd), + PermType: r.Perm.PermType, + } + + role.KeyPermission = append(role.KeyPermission, newPerm) + sort.Sort(permSlice(role.KeyPermission)) + } + + putRole(tx, role) + + // TODO(mitake): currently single role update invalidates every cache + // It should be optimized. + as.clearCachedPerm() + + as.commitRevision(tx) + + plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)]) + + return &pb.AuthRoleGrantPermissionResponse{}, nil +} + +func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error { + // TODO(mitake): this function would be costly so we need a caching mechanism + if !as.isAuthEnabled() { + return nil + } + + // only gets rev == 0 when passed AuthInfo{}; no user given + if revision == 0 { + return ErrUserEmpty + } + + if revision < as.Revision() { + return ErrAuthOldRevision + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(tx, userName) + if user == nil { + plog.Errorf("invalid user name %s for permission checking", userName) + return ErrPermissionDenied + } + + // root role should have permission on all ranges + if hasRootRole(user) { + return nil + } + + if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) { + return nil + } + + return ErrPermissionDenied +} + +func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error { + return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE) +} + +func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { + return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ) +} + +func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { + return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE) +} + +func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { + if !as.isAuthEnabled() { + return nil + } + if authInfo == nil { + return ErrUserEmpty + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + u := getUser(tx, authInfo.Username) + if u == nil { + return ErrUserNotFound + } + + if !hasRootRole(u) { + return ErrPermissionDenied + } + + return nil +} + +func getUser(tx backend.BatchTx, username string) *authpb.User { + _, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0) + if len(vs) == 0 { + return nil + } + + user := &authpb.User{} + err := user.Unmarshal(vs[0]) + if err != nil { + plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err) + } + return user +} + +func getAllUsers(tx backend.BatchTx) []*authpb.User { + _, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1) + if len(vs) == 0 { + return nil + } + + var users []*authpb.User + + for _, v := range vs { + user := &authpb.User{} + err := user.Unmarshal(v) + if err != nil { + plog.Panicf("failed to unmarshal user struct: %s", err) + } + + users = append(users, user) + } + + return users +} + +func putUser(tx backend.BatchTx, user *authpb.User) { + b, err := user.Marshal() + if err != nil { + plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err) + } + tx.UnsafePut(authUsersBucketName, user.Name, b) +} + +func delUser(tx backend.BatchTx, username string) { + tx.UnsafeDelete(authUsersBucketName, []byte(username)) +} + +func getRole(tx backend.BatchTx, rolename string) *authpb.Role { + _, vs := tx.UnsafeRange(authRolesBucketName, []byte(rolename), nil, 0) + if len(vs) == 0 { + return nil + } + + role := &authpb.Role{} + err := role.Unmarshal(vs[0]) + if err != nil { + plog.Panicf("failed to unmarshal role struct (name: %s): %s", rolename, err) + } + return role +} + +func getAllRoles(tx backend.BatchTx) []*authpb.Role { + _, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1) + if len(vs) == 0 { + return nil + } + + var roles []*authpb.Role + + for _, v := range vs { + role := &authpb.Role{} + err := role.Unmarshal(v) + if err != nil { + plog.Panicf("failed to unmarshal role struct: %s", err) + } + + roles = append(roles, role) + } + + return roles +} + +func putRole(tx backend.BatchTx, role *authpb.Role) { + b, err := role.Marshal() + if err != nil { + plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err) + } + + tx.UnsafePut(authRolesBucketName, []byte(role.Name), b) +} + +func delRole(tx backend.BatchTx, rolename string) { + tx.UnsafeDelete(authRolesBucketName, []byte(rolename)) +} + +func (as *authStore) isAuthEnabled() bool { + as.enabledMu.RLock() + defer as.enabledMu.RUnlock() + return as.enabled +} + +func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { + tx := be.BatchTx() + tx.Lock() + + tx.UnsafeCreateBucket(authBucketName) + tx.UnsafeCreateBucket(authUsersBucketName) + tx.UnsafeCreateBucket(authRolesBucketName) + + enabled := false + _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0) + if len(vs) == 1 { + if bytes.Equal(vs[0], authEnabled) { + enabled = true + } + } + + as := &authStore{ + be: be, + revision: getRevision(tx), + enabled: enabled, + rangePermCache: make(map[string]*unifiedRangePermissions), + tokenProvider: tp, + } + + if enabled { + as.tokenProvider.enable() + } + + if as.Revision() == 0 { + as.commitRevision(tx) + } + + tx.Unlock() + be.ForceCommit() + + return as +} + +func hasRootRole(u *authpb.User) bool { + for _, r := range u.Roles { + if r == rootRole { + return true + } + } + return false +} + +func (as *authStore) commitRevision(tx backend.BatchTx) { + atomic.AddUint64(&as.revision, 1) + revBytes := make([]byte, revBytesLen) + binary.BigEndian.PutUint64(revBytes, as.Revision()) + tx.UnsafePut(authBucketName, revisionKey, revBytes) +} + +func getRevision(tx backend.BatchTx) uint64 { + _, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0) + if len(vs) != 1 { + // this can happen in the initialization phase + return 0 + } + + return binary.BigEndian.Uint64(vs[0]) +} + +func (as *authStore) setRevision(rev uint64) { + atomic.StoreUint64(&as.revision, rev) +} + +func (as *authStore) Revision() uint64 { + return atomic.LoadUint64(&as.revision) +} + +func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { + peer, ok := peer.FromContext(ctx) + if !ok || peer == nil || peer.AuthInfo == nil { + return nil + } + + tlsInfo := peer.AuthInfo.(credentials.TLSInfo) + for _, chains := range tlsInfo.State.VerifiedChains { + for _, chain := range chains { + cn := chain.Subject.CommonName + plog.Debugf("found common name %s", cn) + + return &AuthInfo{ + Username: cn, + Revision: as.Revision(), + } + } + } + + return nil +} + +func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { + md, ok := metadata.FromContext(ctx) + if !ok { + return nil, nil + } + + ts, tok := md["token"] + if !tok { + return nil, nil + } + + token := ts[0] + authInfo, uok := as.authInfoFromToken(ctx, token) + if !uok { + plog.Warningf("invalid auth token: %s", token) + return nil, ErrInvalidAuthToken + } + return authInfo, nil +} + +func (as *authStore) GenTokenPrefix() (string, error) { + return as.tokenProvider.genTokenPrefix() +} + +func decomposeOpts(optstr string) (string, map[string]string, error) { + opts := strings.Split(optstr, ",") + tokenType := opts[0] + + typeSpecificOpts := make(map[string]string) + for i := 1; i < len(opts); i++ { + pair := strings.Split(opts[i], "=") + + if len(pair) != 2 { + plog.Errorf("invalid token specific option: %s", optstr) + return "", nil, ErrInvalidAuthOpts + } + + if _, ok := typeSpecificOpts[pair[0]]; ok { + plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr) + return "", nil, ErrInvalidAuthOpts + } + + typeSpecificOpts[pair[0]] = pair[1] + } + + return tokenType, typeSpecificOpts, nil + +} + +func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) { + tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts) + if err != nil { + return nil, ErrInvalidAuthOpts + } + + switch tokenType { + case "simple": + plog.Warningf("simple token is not cryptographically signed") + return newTokenProviderSimple(indexWaiter), nil + case "jwt": + return newTokenProviderJWT(typeSpecificOpts) + default: + plog.Errorf("unknown token type: %s", tokenType) + return nil, ErrInvalidAuthOpts + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go new file mode 100644 index 000000000..5e2de58e9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go @@ -0,0 +1,86 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "sync" + + "github.com/coreos/etcd/version" + "github.com/coreos/go-semver/semver" + "github.com/coreos/pkg/capnslog" +) + +type Capability string + +const ( + AuthCapability Capability = "auth" + V3rpcCapability Capability = "v3rpc" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api") + + // capabilityMaps is a static map of version to capability map. + capabilityMaps = map[string]map[Capability]bool{ + "3.0.0": {AuthCapability: true, V3rpcCapability: true}, + "3.1.0": {AuthCapability: true, V3rpcCapability: true}, + "3.2.0": {AuthCapability: true, V3rpcCapability: true}, + } + + enableMapMu sync.RWMutex + // enabledMap points to a map in capabilityMaps + enabledMap map[Capability]bool + + curVersion *semver.Version +) + +func init() { + enabledMap = map[Capability]bool{ + AuthCapability: true, + V3rpcCapability: true, + } +} + +// UpdateCapability updates the enabledMap when the cluster version increases. +func UpdateCapability(v *semver.Version) { + if v == nil { + // if recovered but version was never set by cluster + return + } + enableMapMu.Lock() + if curVersion != nil && !curVersion.LessThan(*v) { + enableMapMu.Unlock() + return + } + curVersion = v + enabledMap = capabilityMaps[curVersion.String()] + enableMapMu.Unlock() + plog.Infof("enabled capabilities for version %s", version.Cluster(v.String())) +} + +func IsCapabilityEnabled(c Capability) bool { + enableMapMu.RLock() + defer enableMapMu.RUnlock() + if enabledMap == nil { + return false + } + return enabledMap[c] +} + +func EnableCapability(c Capability) { + enableMapMu.Lock() + defer enableMapMu.Unlock() + enabledMap[c] = true +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go new file mode 100644 index 000000000..87face4a1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go @@ -0,0 +1,41 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/pkg/types" + + "github.com/coreos/go-semver/semver" +) + +// Cluster is an interface representing a collection of members in one etcd cluster. +type Cluster interface { + // ID returns the cluster ID + ID() types.ID + // ClientURLs returns an aggregate set of all URLs on which this + // cluster is listening for client requests + ClientURLs() []string + // Members returns a slice of members sorted by their ID + Members() []*membership.Member + // Member retrieves a particular member based on ID, or nil if the + // member does not exist in the cluster + Member(id types.ID) *membership.Member + // IsIDRemoved checks whether the given ID has been removed from this + // cluster at some point in the past + IsIDRemoved(id types.ID) bool + // Version is the cluster-wide minimum major.minor version. + Version() *semver.Version +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/doc.go new file mode 100644 index 000000000..f44881be6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package api manages the capabilities and features that are exposed to clients by the etcd cluster. +package api diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go new file mode 100644 index 000000000..e66c5261d --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go @@ -0,0 +1,157 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "github.com/coreos/etcd/etcdserver" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" +) + +type AuthServer struct { + authenticator etcdserver.Authenticator +} + +func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer { + return &AuthServer{authenticator: s} +} + +func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { + resp, err := as.authenticator.AuthEnable(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { + resp, err := as.authenticator.AuthDisable(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { + resp, err := as.authenticator.Authenticate(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + resp, err := as.authenticator.RoleAdd(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + resp, err := as.authenticator.RoleDelete(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + resp, err := as.authenticator.RoleGet(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + resp, err := as.authenticator.RoleList(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + resp, err := as.authenticator.RoleRevokePermission(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + resp, err := as.authenticator.RoleGrantPermission(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + resp, err := as.authenticator.UserAdd(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + resp, err := as.authenticator.UserDelete(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + resp, err := as.authenticator.UserGet(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + resp, err := as.authenticator.UserList(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + resp, err := as.authenticator.UserGrantRole(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + resp, err := as.authenticator.UserRevokeRole(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + resp, err := as.authenticator.UserChangePassword(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go new file mode 100644 index 000000000..17a2c87ae --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go @@ -0,0 +1,34 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import "github.com/gogo/protobuf/proto" + +type codec struct{} + +func (c *codec) Marshal(v interface{}) ([]byte, error) { + b, err := proto.Marshal(v.(proto.Message)) + sentBytes.Add(float64(len(b))) + return b, err +} + +func (c *codec) Unmarshal(data []byte, v interface{}) error { + receivedBytes.Add(float64(len(data))) + return proto.Unmarshal(data, v.(proto.Message)) +} + +func (c *codec) String() string { + return "proto" +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go new file mode 100644 index 000000000..68a7c120e --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go @@ -0,0 +1,53 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "crypto/tls" + "math" + + "github.com/coreos/etcd/etcdserver" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +const maxStreams = math.MaxUint32 + +func init() { + grpclog.SetLogger(plog) +} + +func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { + var opts []grpc.ServerOption + opts = append(opts, grpc.CustomCodec(&codec{})) + if tls != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(tls))) + } + opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s))) + opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s))) + opts = append(opts, grpc.MaxConcurrentStreams(maxStreams)) + grpcServer := grpc.NewServer(opts...) + + pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) + pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) + pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) + pb.RegisterClusterServer(grpcServer, NewClusterServer(s)) + pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) + pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) + + return grpcServer +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go new file mode 100644 index 000000000..d6d7f35d5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go @@ -0,0 +1,46 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "github.com/coreos/etcd/etcdserver" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +) + +type header struct { + clusterID int64 + memberID int64 + raftTimer etcdserver.RaftTimer + rev func() int64 +} + +func newHeader(s *etcdserver.EtcdServer) header { + return header{ + clusterID: int64(s.Cluster().ID()), + memberID: int64(s.ID()), + raftTimer: s, + rev: func() int64 { return s.KV().Rev() }, + } +} + +// fill populates pb.ResponseHeader using etcdserver information +func (h *header) fill(rh *pb.ResponseHeader) { + rh.ClusterId = uint64(h.clusterID) + rh.MemberId = uint64(h.memberID) + rh.RaftTerm = h.raftTimer.Term() + if rh.Revision == 0 { + rh.Revision = h.rev() + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go new file mode 100644 index 000000000..29aef2914 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go @@ -0,0 +1,144 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "sync" + "time" + + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft" + + prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +const ( + maxNoLeaderCnt = 3 +) + +type streamsMap struct { + mu sync.Mutex + streams map[grpc.ServerStream]struct{} +} + +func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + if !api.IsCapabilityEnabled(api.V3rpcCapability) { + return nil, rpctypes.ErrGRPCNotCapable + } + + md, ok := metadata.FromContext(ctx) + if ok { + if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { + if s.Leader() == types.ID(raft.None) { + return nil, rpctypes.ErrGRPCNoLeader + } + } + } + + return prometheus.UnaryServerInterceptor(ctx, req, info, handler) + } +} + +func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor { + smap := monitorLeader(s) + + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if !api.IsCapabilityEnabled(api.V3rpcCapability) { + return rpctypes.ErrGRPCNotCapable + } + + md, ok := metadata.FromContext(ss.Context()) + if ok { + if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { + if s.Leader() == types.ID(raft.None) { + return rpctypes.ErrGRPCNoLeader + } + + cctx, cancel := context.WithCancel(ss.Context()) + ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss} + + smap.mu.Lock() + smap.streams[ss] = struct{}{} + smap.mu.Unlock() + + defer func() { + smap.mu.Lock() + delete(smap.streams, ss) + smap.mu.Unlock() + cancel() + }() + + } + } + + return prometheus.StreamServerInterceptor(srv, ss, info, handler) + } +} + +type serverStreamWithCtx struct { + grpc.ServerStream + ctx context.Context + cancel *context.CancelFunc +} + +func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx } + +func monitorLeader(s *etcdserver.EtcdServer) *streamsMap { + smap := &streamsMap{ + streams: make(map[grpc.ServerStream]struct{}), + } + + go func() { + election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond + noLeaderCnt := 0 + + for { + select { + case <-s.StopNotify(): + return + case <-time.After(election): + if s.Leader() == types.ID(raft.None) { + noLeaderCnt++ + } else { + noLeaderCnt = 0 + } + + // We are more conservative on canceling existing streams. Reconnecting streams + // cost much more than just rejecting new requests. So we wait until the member + // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams. + if noLeaderCnt >= maxNoLeaderCnt { + smap.mu.Lock() + for ss := range smap.streams { + if ssWithCtx, ok := ss.(serverStreamWithCtx); ok { + (*ssWithCtx.cancel)() + <-ss.Context().Done() + } + } + smap.streams = make(map[grpc.ServerStream]struct{}) + smap.mu.Unlock() + } + } + } + }() + + return smap +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go new file mode 100644 index 000000000..d0220e03a --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go @@ -0,0 +1,259 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3rpc implements etcd v3 RPC system based on gRPC. +package v3rpc + +import ( + "sort" + + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/pkg/capnslog" + "golang.org/x/net/context" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc") + + // Max operations per txn list. For example, Txn.Success can have at most 128 operations, + // and Txn.Failure can have at most 128 operations. + MaxOpsPerTxn = 128 +) + +type kvServer struct { + hdr header + kv etcdserver.RaftKV +} + +func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer { + return &kvServer{hdr: newHeader(s), kv: s} +} + +func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { + if err := checkRangeRequest(r); err != nil { + return nil, err + } + + resp, err := s.kv.Range(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + + if resp.Header == nil { + plog.Panic("unexpected nil resp.Header") + } + s.hdr.fill(resp.Header) + return resp, nil +} + +func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { + if err := checkPutRequest(r); err != nil { + return nil, err + } + + resp, err := s.kv.Put(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + + if resp.Header == nil { + plog.Panic("unexpected nil resp.Header") + } + s.hdr.fill(resp.Header) + return resp, nil +} + +func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + if err := checkDeleteRequest(r); err != nil { + return nil, err + } + + resp, err := s.kv.DeleteRange(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + + if resp.Header == nil { + plog.Panic("unexpected nil resp.Header") + } + s.hdr.fill(resp.Header) + return resp, nil +} + +func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { + if err := checkTxnRequest(r); err != nil { + return nil, err + } + + resp, err := s.kv.Txn(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + + if resp.Header == nil { + plog.Panic("unexpected nil resp.Header") + } + s.hdr.fill(resp.Header) + return resp, nil +} + +func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { + resp, err := s.kv.Compact(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + + if resp.Header == nil { + plog.Panic("unexpected nil resp.Header") + } + s.hdr.fill(resp.Header) + return resp, nil +} + +func checkRangeRequest(r *pb.RangeRequest) error { + if len(r.Key) == 0 { + return rpctypes.ErrGRPCEmptyKey + } + return nil +} + +func checkPutRequest(r *pb.PutRequest) error { + if len(r.Key) == 0 { + return rpctypes.ErrGRPCEmptyKey + } + if r.IgnoreValue && len(r.Value) != 0 { + return rpctypes.ErrGRPCValueProvided + } + if r.IgnoreLease && r.Lease != 0 { + return rpctypes.ErrGRPCLeaseProvided + } + return nil +} + +func checkDeleteRequest(r *pb.DeleteRangeRequest) error { + if len(r.Key) == 0 { + return rpctypes.ErrGRPCEmptyKey + } + return nil +} + +func checkTxnRequest(r *pb.TxnRequest) error { + if len(r.Compare) > MaxOpsPerTxn || len(r.Success) > MaxOpsPerTxn || len(r.Failure) > MaxOpsPerTxn { + return rpctypes.ErrGRPCTooManyOps + } + + for _, c := range r.Compare { + if len(c.Key) == 0 { + return rpctypes.ErrGRPCEmptyKey + } + } + + for _, u := range r.Success { + if err := checkRequestOp(u); err != nil { + return err + } + } + if err := checkRequestDupKeys(r.Success); err != nil { + return err + } + + for _, u := range r.Failure { + if err := checkRequestOp(u); err != nil { + return err + } + } + return checkRequestDupKeys(r.Failure) +} + +// checkRequestDupKeys gives rpctypes.ErrGRPCDuplicateKey if the same key is modified twice +func checkRequestDupKeys(reqs []*pb.RequestOp) error { + // check put overlap + keys := make(map[string]struct{}) + for _, requ := range reqs { + tv, ok := requ.Request.(*pb.RequestOp_RequestPut) + if !ok { + continue + } + preq := tv.RequestPut + if preq == nil { + continue + } + if _, ok := keys[string(preq.Key)]; ok { + return rpctypes.ErrGRPCDuplicateKey + } + keys[string(preq.Key)] = struct{}{} + } + + // no need to check deletes if no puts; delete overlaps are permitted + if len(keys) == 0 { + return nil + } + + // sort keys for range checking + sortedKeys := []string{} + for k := range keys { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + // check put overlap with deletes + for _, requ := range reqs { + tv, ok := requ.Request.(*pb.RequestOp_RequestDeleteRange) + if !ok { + continue + } + dreq := tv.RequestDeleteRange + if dreq == nil { + continue + } + if dreq.RangeEnd == nil { + if _, found := keys[string(dreq.Key)]; found { + return rpctypes.ErrGRPCDuplicateKey + } + } else { + lo := sort.SearchStrings(sortedKeys, string(dreq.Key)) + hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd)) + if lo != hi { + // element between lo and hi => overlap + return rpctypes.ErrGRPCDuplicateKey + } + } + } + + return nil +} + +func checkRequestOp(u *pb.RequestOp) error { + // TODO: ensure only one of the field is set. + switch uv := u.Request.(type) { + case *pb.RequestOp_RequestRange: + if uv.RequestRange != nil { + return checkRangeRequest(uv.RequestRange) + } + case *pb.RequestOp_RequestPut: + if uv.RequestPut != nil { + return checkPutRequest(uv.RequestPut) + } + case *pb.RequestOp_RequestDeleteRange: + if uv.RequestDeleteRange != nil { + return checkDeleteRequest(uv.RequestDeleteRange) + } + default: + // empty op / nil entry + return rpctypes.ErrGRPCKeyNotFound + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go new file mode 100644 index 000000000..a25d0ce6a --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go @@ -0,0 +1,123 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "io" + + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/lease" + "golang.org/x/net/context" +) + +type LeaseServer struct { + hdr header + le etcdserver.Lessor +} + +func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { + return &LeaseServer{le: s, hdr: newHeader(s)} +} + +func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + resp, err := ls.le.LeaseGrant(ctx, cr) + + if err != nil { + return nil, togRPCError(err) + } + ls.hdr.fill(resp.Header) + return resp, nil +} + +func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + resp, err := ls.le.LeaseRevoke(ctx, rr) + if err != nil { + return nil, togRPCError(err) + } + ls.hdr.fill(resp.Header) + return resp, nil +} + +func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { + resp, err := ls.le.LeaseTimeToLive(ctx, rr) + if err != nil && err != lease.ErrLeaseNotFound { + return nil, togRPCError(err) + } + if err == lease.ErrLeaseNotFound { + resp = &pb.LeaseTimeToLiveResponse{ + Header: &pb.ResponseHeader{}, + ID: rr.ID, + TTL: -1, + } + } + ls.hdr.fill(resp.Header) + return resp, nil +} + +func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) { + errc := make(chan error, 1) + go func() { + errc <- ls.leaseKeepAlive(stream) + }() + select { + case err = <-errc: + case <-stream.Context().Done(): + // the only server-side cancellation is noleader for now. + err = stream.Context().Err() + if err == context.Canceled { + err = rpctypes.ErrGRPCNoLeader + } + } + return err +} + +func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { + for { + req, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + // Create header before we sent out the renew request. + // This can make sure that the revision is strictly smaller or equal to + // when the keepalive happened at the local server (when the local server is the leader) + // or remote leader. + // Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded + // at rev 4. + resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}} + ls.hdr.fill(resp.Header) + + ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID)) + if err == lease.ErrLeaseNotFound { + err = nil + ttl = 0 + } + + if err != nil { + return togRPCError(err) + } + + resp.TTL = ttl + err = stream.Send(resp) + if err != nil { + return err + } + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go new file mode 100644 index 000000000..3657d0360 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go @@ -0,0 +1,190 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "crypto/sha256" + "io" + + "github.com/coreos/etcd/auth" + "github.com/coreos/etcd/etcdserver" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/version" + "golang.org/x/net/context" +) + +type KVGetter interface { + KV() mvcc.ConsistentWatchableKV +} + +type BackendGetter interface { + Backend() backend.Backend +} + +type Alarmer interface { + Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) +} + +type RaftStatusGetter interface { + Index() uint64 + Term() uint64 + Leader() types.ID +} + +type AuthGetter interface { + AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) + AuthStore() auth.AuthStore +} + +type maintenanceServer struct { + rg RaftStatusGetter + kg KVGetter + bg BackendGetter + a Alarmer + hdr header +} + +func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer { + srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)} + return &authMaintenanceServer{srv, s} +} + +func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { + plog.Noticef("starting to defragment the storage backend...") + err := ms.bg.Backend().Defrag() + if err != nil { + plog.Errorf("failed to defragment the storage backend (%v)", err) + return nil, err + } + plog.Noticef("finished defragmenting the storage backend") + return &pb.DefragmentResponse{}, nil +} + +func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { + snap := ms.bg.Backend().Snapshot() + pr, pw := io.Pipe() + + defer pr.Close() + + go func() { + snap.WriteTo(pw) + if err := snap.Close(); err != nil { + plog.Errorf("error closing snapshot (%v)", err) + } + pw.Close() + }() + + // send file data + h := sha256.New() + br := int64(0) + buf := make([]byte, 32*1024) + sz := snap.Size() + for br < sz { + n, err := io.ReadFull(pr, buf) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return togRPCError(err) + } + br += int64(n) + resp := &pb.SnapshotResponse{ + RemainingBytes: uint64(sz - br), + Blob: buf[:n], + } + if err = srv.Send(resp); err != nil { + return togRPCError(err) + } + h.Write(buf[:n]) + } + + // send sha + sha := h.Sum(nil) + hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha} + if err := srv.Send(hresp); err != nil { + return togRPCError(err) + } + + return nil +} + +func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { + h, rev, err := ms.kg.KV().Hash() + if err != nil { + return nil, togRPCError(err) + } + resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h} + ms.hdr.fill(resp.Header) + return resp, nil +} + +func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { + return ms.a.Alarm(ctx, ar) +} + +func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { + resp := &pb.StatusResponse{ + Header: &pb.ResponseHeader{Revision: ms.hdr.rev()}, + Version: version.Version, + DbSize: ms.bg.Backend().Size(), + Leader: uint64(ms.rg.Leader()), + RaftIndex: ms.rg.Index(), + RaftTerm: ms.rg.Term(), + } + ms.hdr.fill(resp.Header) + return resp, nil +} + +type authMaintenanceServer struct { + *maintenanceServer + ag AuthGetter +} + +func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error { + authInfo, err := ams.ag.AuthInfoFromCtx(ctx) + if err != nil { + return err + } + + return ams.ag.AuthStore().IsAdminPermitted(authInfo) +} + +func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { + if err := ams.isAuthenticated(ctx); err != nil { + return nil, err + } + + return ams.maintenanceServer.Defragment(ctx, sr) +} + +func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { + if err := ams.isAuthenticated(srv.Context()); err != nil { + return err + } + + return ams.maintenanceServer.Snapshot(sr, srv) +} + +func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { + if err := ams.isAuthenticated(ctx); err != nil { + return nil, err + } + + return ams.maintenanceServer.Hash(ctx, r) +} + +func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { + return ams.maintenanceServer.Status(ctx, ar) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go new file mode 100644 index 000000000..91a59389b --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go @@ -0,0 +1,103 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "time" + + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/pkg/types" + "golang.org/x/net/context" +) + +type ClusterServer struct { + cluster api.Cluster + server etcdserver.Server + raftTimer etcdserver.RaftTimer +} + +func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer { + return &ClusterServer{ + cluster: s.Cluster(), + server: s, + raftTimer: s, + } +} + +func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { + urls, err := types.NewURLs(r.PeerURLs) + if err != nil { + return nil, rpctypes.ErrGRPCMemberBadURLs + } + + now := time.Now() + m := membership.NewMember("", urls, "", &now) + membs, merr := cs.server.AddMember(ctx, *m) + if merr != nil { + return nil, togRPCError(merr) + } + + return &pb.MemberAddResponse{ + Header: cs.header(), + Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, + Members: membersToProtoMembers(membs), + }, nil +} + +func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { + membs, err := cs.server.RemoveMember(ctx, r.ID) + if err != nil { + return nil, togRPCError(err) + } + return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil +} + +func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { + m := membership.Member{ + ID: types.ID(r.ID), + RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs}, + } + membs, err := cs.server.UpdateMember(ctx, m) + if err != nil { + return nil, togRPCError(err) + } + return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil +} + +func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { + membs := membersToProtoMembers(cs.cluster.Members()) + return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil +} + +func (cs *ClusterServer) header() *pb.ResponseHeader { + return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()} +} + +func membersToProtoMembers(membs []*membership.Member) []*pb.Member { + protoMembs := make([]*pb.Member, len(membs)) + for i := range membs { + protoMembs[i] = &pb.Member{ + Name: membs[i].Name, + ID: uint64(membs[i].ID), + PeerURLs: membs[i].PeerURLs, + ClientURLs: membs[i].ClientURLs, + } + } + return protoMembs +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go new file mode 100644 index 000000000..6cb41a61e --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go @@ -0,0 +1,38 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import "github.com/prometheus/client_golang/prometheus" + +var ( + sentBytes = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "network", + Name: "client_grpc_sent_bytes_total", + Help: "The total number of bytes sent to grpc clients.", + }) + + receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "network", + Name: "client_grpc_received_bytes_total", + Help: "The total number of bytes received from grpc clients.", + }) +) + +func init() { + prometheus.MustRegister(sentBytes) + prometheus.MustRegister(receivedBytes) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go new file mode 100644 index 000000000..836f2fd3f --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go @@ -0,0 +1,89 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/types" + "golang.org/x/net/context" +) + +type quotaKVServer struct { + pb.KVServer + qa quotaAlarmer +} + +type quotaAlarmer struct { + q etcdserver.Quota + a Alarmer + id types.ID +} + +// check whether request satisfies the quota. If there is not enough space, +// ignore request and raise the free space alarm. +func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { + if qa.q.Available(r) { + return nil + } + req := &pb.AlarmRequest{ + MemberID: uint64(qa.id), + Action: pb.AlarmRequest_ACTIVATE, + Alarm: pb.AlarmType_NOSPACE, + } + qa.a.Alarm(ctx, req) + return rpctypes.ErrGRPCNoSpace +} + +func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { + return "aKVServer{ + NewKVServer(s), + quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, + } +} + +func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { + if err := s.qa.check(ctx, r); err != nil { + return nil, err + } + return s.KVServer.Put(ctx, r) +} + +func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { + if err := s.qa.check(ctx, r); err != nil { + return nil, err + } + return s.KVServer.Txn(ctx, r) +} + +type quotaLeaseServer struct { + pb.LeaseServer + qa quotaAlarmer +} + +func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + if err := s.qa.check(ctx, cr); err != nil { + return nil, err + } + return s.LeaseServer.LeaseGrant(ctx, cr) +} + +func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { + return "aLeaseServer{ + NewLeaseServer(s), + quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go new file mode 100644 index 000000000..8d38d9bd1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go @@ -0,0 +1,103 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "github.com/coreos/etcd/auth" + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func togRPCError(err error) error { + switch err { + case membership.ErrIDRemoved: + return rpctypes.ErrGRPCMemberNotFound + case membership.ErrIDNotFound: + return rpctypes.ErrGRPCMemberNotFound + case membership.ErrIDExists: + return rpctypes.ErrGRPCMemberExist + case membership.ErrPeerURLexists: + return rpctypes.ErrGRPCPeerURLExist + case etcdserver.ErrNotEnoughStartedMembers: + return rpctypes.ErrMemberNotEnoughStarted + + case mvcc.ErrCompacted: + return rpctypes.ErrGRPCCompacted + case mvcc.ErrFutureRev: + return rpctypes.ErrGRPCFutureRev + case etcdserver.ErrRequestTooLarge: + return rpctypes.ErrGRPCRequestTooLarge + case etcdserver.ErrNoSpace: + return rpctypes.ErrGRPCNoSpace + case etcdserver.ErrTooManyRequests: + return rpctypes.ErrTooManyRequests + + case etcdserver.ErrNoLeader: + return rpctypes.ErrGRPCNoLeader + case etcdserver.ErrStopped: + return rpctypes.ErrGRPCStopped + case etcdserver.ErrTimeout: + return rpctypes.ErrGRPCTimeout + case etcdserver.ErrTimeoutDueToLeaderFail: + return rpctypes.ErrGRPCTimeoutDueToLeaderFail + case etcdserver.ErrTimeoutDueToConnectionLost: + return rpctypes.ErrGRPCTimeoutDueToConnectionLost + case etcdserver.ErrUnhealthy: + return rpctypes.ErrGRPCUnhealthy + case etcdserver.ErrKeyNotFound: + return rpctypes.ErrGRPCKeyNotFound + + case lease.ErrLeaseNotFound: + return rpctypes.ErrGRPCLeaseNotFound + case lease.ErrLeaseExists: + return rpctypes.ErrGRPCLeaseExist + + case auth.ErrRootUserNotExist: + return rpctypes.ErrGRPCRootUserNotExist + case auth.ErrRootRoleNotExist: + return rpctypes.ErrGRPCRootRoleNotExist + case auth.ErrUserAlreadyExist: + return rpctypes.ErrGRPCUserAlreadyExist + case auth.ErrUserEmpty: + return rpctypes.ErrGRPCUserEmpty + case auth.ErrUserNotFound: + return rpctypes.ErrGRPCUserNotFound + case auth.ErrRoleAlreadyExist: + return rpctypes.ErrGRPCRoleAlreadyExist + case auth.ErrRoleNotFound: + return rpctypes.ErrGRPCRoleNotFound + case auth.ErrAuthFailed: + return rpctypes.ErrGRPCAuthFailed + case auth.ErrPermissionDenied: + return rpctypes.ErrGRPCPermissionDenied + case auth.ErrRoleNotGranted: + return rpctypes.ErrGRPCRoleNotGranted + case auth.ErrPermissionNotGranted: + return rpctypes.ErrGRPCPermissionNotGranted + case auth.ErrAuthNotEnabled: + return rpctypes.ErrGRPCAuthNotEnabled + case auth.ErrInvalidAuthToken: + return rpctypes.ErrGRPCInvalidAuthToken + case auth.ErrInvalidAuthMgmt: + return rpctypes.ErrGRPCInvalidAuthMgmt + default: + return grpc.Errorf(codes.Unknown, err.Error()) + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go new file mode 100644 index 000000000..84c0a5eac --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go @@ -0,0 +1,426 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "io" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/coreos/etcd/auth" + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +type watchServer struct { + clusterID int64 + memberID int64 + raftTimer etcdserver.RaftTimer + watchable mvcc.WatchableKV + + ag AuthGetter +} + +func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { + return &watchServer{ + clusterID: int64(s.Cluster().ID()), + memberID: int64(s.ID()), + raftTimer: s, + watchable: s.Watchable(), + ag: s, + } +} + +var ( + // External test can read this with GetProgressReportInterval() + // and change this to a small value to finish fast with + // SetProgressReportInterval(). + progressReportInterval = 10 * time.Minute + progressReportIntervalMu sync.RWMutex +) + +func GetProgressReportInterval() time.Duration { + progressReportIntervalMu.RLock() + defer progressReportIntervalMu.RUnlock() + return progressReportInterval +} + +func SetProgressReportInterval(newTimeout time.Duration) { + progressReportIntervalMu.Lock() + defer progressReportIntervalMu.Unlock() + progressReportInterval = newTimeout +} + +const ( + // We send ctrl response inside the read loop. We do not want + // send to block read, but we still want ctrl response we sent to + // be serialized. Thus we use a buffered chan to solve the problem. + // A small buffer should be OK for most cases, since we expect the + // ctrl requests are infrequent. + ctrlStreamBufLen = 16 +) + +// serverWatchStream is an etcd server side stream. It receives requests +// from client side gRPC stream. It receives watch events from mvcc.WatchStream, +// and creates responses that forwarded to gRPC stream. +// It also forwards control message like watch created and canceled. +type serverWatchStream struct { + clusterID int64 + memberID int64 + raftTimer etcdserver.RaftTimer + + watchable mvcc.WatchableKV + + gRPCStream pb.Watch_WatchServer + watchStream mvcc.WatchStream + ctrlStream chan *pb.WatchResponse + + // mu protects progress, prevKV + mu sync.Mutex + // progress tracks the watchID that stream might need to send + // progress to. + // TODO: combine progress and prevKV into a single struct? + progress map[mvcc.WatchID]bool + prevKV map[mvcc.WatchID]bool + + // closec indicates the stream is closed. + closec chan struct{} + + // wg waits for the send loop to complete + wg sync.WaitGroup + + ag AuthGetter +} + +func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { + sws := serverWatchStream{ + clusterID: ws.clusterID, + memberID: ws.memberID, + raftTimer: ws.raftTimer, + + watchable: ws.watchable, + + gRPCStream: stream, + watchStream: ws.watchable.NewWatchStream(), + // chan for sending control response like watcher created and canceled. + ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen), + progress: make(map[mvcc.WatchID]bool), + prevKV: make(map[mvcc.WatchID]bool), + closec: make(chan struct{}), + + ag: ws.ag, + } + + sws.wg.Add(1) + go func() { + sws.sendLoop() + sws.wg.Done() + }() + + errc := make(chan error, 1) + // Ideally recvLoop would also use sws.wg to signal its completion + // but when stream.Context().Done() is closed, the stream's recv + // may continue to block since it uses a different context, leading to + // deadlock when calling sws.close(). + go func() { + if rerr := sws.recvLoop(); rerr != nil { + errc <- rerr + } + }() + select { + case err = <-errc: + close(sws.ctrlStream) + case <-stream.Context().Done(): + err = stream.Context().Err() + // the only server-side cancellation is noleader for now. + if err == context.Canceled { + err = rpctypes.ErrGRPCNoLeader + } + } + sws.close() + return err +} + +func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool { + authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context()) + if err != nil { + return false + } + if authInfo == nil { + // if auth is enabled, IsRangePermitted() can cause an error + authInfo = &auth.AuthInfo{} + } + + return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil +} + +func (sws *serverWatchStream) recvLoop() error { + for { + req, err := sws.gRPCStream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + switch uv := req.RequestUnion.(type) { + case *pb.WatchRequest_CreateRequest: + if uv.CreateRequest == nil { + break + } + + creq := uv.CreateRequest + if len(creq.Key) == 0 { + // \x00 is the smallest key + creq.Key = []byte{0} + } + if len(creq.RangeEnd) == 0 { + // force nil since watchstream.Watch distinguishes + // between nil and []byte{} for single key / >= + creq.RangeEnd = nil + } + if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 { + // support >= key queries + creq.RangeEnd = []byte{} + } + + if !sws.isWatchPermitted(creq) { + wr := &pb.WatchResponse{ + Header: sws.newResponseHeader(sws.watchStream.Rev()), + WatchId: -1, + Canceled: true, + Created: true, + CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(), + } + + select { + case sws.ctrlStream <- wr: + case <-sws.closec: + } + return nil + } + + filters := FiltersFromRequest(creq) + + wsrev := sws.watchStream.Rev() + rev := creq.StartRevision + if rev == 0 { + rev = wsrev + 1 + } + id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...) + if id != -1 { + sws.mu.Lock() + if creq.ProgressNotify { + sws.progress[id] = true + } + if creq.PrevKv { + sws.prevKV[id] = true + } + sws.mu.Unlock() + } + wr := &pb.WatchResponse{ + Header: sws.newResponseHeader(wsrev), + WatchId: int64(id), + Created: true, + Canceled: id == -1, + } + select { + case sws.ctrlStream <- wr: + case <-sws.closec: + return nil + } + case *pb.WatchRequest_CancelRequest: + if uv.CancelRequest != nil { + id := uv.CancelRequest.WatchId + err := sws.watchStream.Cancel(mvcc.WatchID(id)) + if err == nil { + sws.ctrlStream <- &pb.WatchResponse{ + Header: sws.newResponseHeader(sws.watchStream.Rev()), + WatchId: id, + Canceled: true, + } + sws.mu.Lock() + delete(sws.progress, mvcc.WatchID(id)) + delete(sws.prevKV, mvcc.WatchID(id)) + sws.mu.Unlock() + } + } + default: + // we probably should not shutdown the entire stream when + // receive an valid command. + // so just do nothing instead. + continue + } + } +} + +func (sws *serverWatchStream) sendLoop() { + // watch ids that are currently active + ids := make(map[mvcc.WatchID]struct{}) + // watch responses pending on a watch id creation message + pending := make(map[mvcc.WatchID][]*pb.WatchResponse) + + interval := GetProgressReportInterval() + progressTicker := time.NewTicker(interval) + + defer func() { + progressTicker.Stop() + // drain the chan to clean up pending events + for ws := range sws.watchStream.Chan() { + mvcc.ReportEventReceived(len(ws.Events)) + } + for _, wrs := range pending { + for _, ws := range wrs { + mvcc.ReportEventReceived(len(ws.Events)) + } + } + }() + + for { + select { + case wresp, ok := <-sws.watchStream.Chan(): + if !ok { + return + } + + // TODO: evs is []mvccpb.Event type + // either return []*mvccpb.Event from the mvcc package + // or define protocol buffer with []mvccpb.Event. + evs := wresp.Events + events := make([]*mvccpb.Event, len(evs)) + sws.mu.Lock() + needPrevKV := sws.prevKV[wresp.WatchID] + sws.mu.Unlock() + for i := range evs { + events[i] = &evs[i] + + if needPrevKV { + opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1} + r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt) + if err == nil && len(r.KVs) != 0 { + events[i].PrevKv = &(r.KVs[0]) + } + } + } + + wr := &pb.WatchResponse{ + Header: sws.newResponseHeader(wresp.Revision), + WatchId: int64(wresp.WatchID), + Events: events, + CompactRevision: wresp.CompactRevision, + } + + if _, hasId := ids[wresp.WatchID]; !hasId { + // buffer if id not yet announced + wrs := append(pending[wresp.WatchID], wr) + pending[wresp.WatchID] = wrs + continue + } + + mvcc.ReportEventReceived(len(evs)) + if err := sws.gRPCStream.Send(wr); err != nil { + return + } + + sws.mu.Lock() + if len(evs) > 0 && sws.progress[wresp.WatchID] { + // elide next progress update if sent a key update + sws.progress[wresp.WatchID] = false + } + sws.mu.Unlock() + + case c, ok := <-sws.ctrlStream: + if !ok { + return + } + + if err := sws.gRPCStream.Send(c); err != nil { + return + } + + // track id creation + wid := mvcc.WatchID(c.WatchId) + if c.Canceled { + delete(ids, wid) + continue + } + if c.Created { + // flush buffered events + ids[wid] = struct{}{} + for _, v := range pending[wid] { + mvcc.ReportEventReceived(len(v.Events)) + if err := sws.gRPCStream.Send(v); err != nil { + return + } + } + delete(pending, wid) + } + case <-progressTicker.C: + sws.mu.Lock() + for id, ok := range sws.progress { + if ok { + sws.watchStream.RequestProgress(id) + } + sws.progress[id] = true + } + sws.mu.Unlock() + case <-sws.closec: + return + } + } +} + +func (sws *serverWatchStream) close() { + sws.watchStream.Close() + close(sws.closec) + sws.wg.Wait() +} + +func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader { + return &pb.ResponseHeader{ + ClusterId: uint64(sws.clusterID), + MemberId: uint64(sws.memberID), + Revision: rev, + RaftTerm: sws.raftTimer.Term(), + } +} + +func filterNoDelete(e mvccpb.Event) bool { + return e.Type == mvccpb.DELETE +} + +func filterNoPut(e mvccpb.Event) bool { + return e.Type == mvccpb.PUT +} + +func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc { + filters := make([]mvcc.FilterFunc, 0, len(creq.Filters)) + for _, ft := range creq.Filters { + switch ft { + case pb.WatchCreateRequest_NOPUT: + filters = append(filters, filterNoPut) + case pb.WatchCreateRequest_NODELETE: + filters = append(filters, filterNoDelete) + default: + } + } + return filters +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go new file mode 100644 index 000000000..0be93c52b --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/apply.go @@ -0,0 +1,878 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "bytes" + "sort" + "time" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/mvccpb" + "github.com/coreos/etcd/pkg/types" + "github.com/gogo/protobuf/proto" + "golang.org/x/net/context" +) + +const ( + warnApplyDuration = 100 * time.Millisecond +) + +type applyResult struct { + resp proto.Message + err error + // physc signals the physical effect of the request has completed in addition + // to being logically reflected by the node. Currently only used for + // Compaction requests. + physc <-chan struct{} +} + +// applierV3 is the interface for processing V3 raft messages +type applierV3 interface { + Apply(r *pb.InternalRaftRequest) *applyResult + + Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) + Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) + DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) + Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) + Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) + + LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) + LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) + + Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error) + + Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) + + AuthEnable() (*pb.AuthEnableResponse, error) + AuthDisable() (*pb.AuthDisableResponse, error) + + UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) + UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) + UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) + UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) + UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) + UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) + RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) + RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) + RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) + RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) + RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) + UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) + RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) +} + +type applierV3backend struct { + s *EtcdServer +} + +func (s *EtcdServer) newApplierV3() applierV3 { + return newAuthApplierV3( + s.AuthStore(), + newQuotaApplierV3(s, &applierV3backend{s}), + ) +} + +func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { + ar := &applyResult{} + + // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls + switch { + case r.Range != nil: + ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range) + case r.Put != nil: + ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put) + case r.DeleteRange != nil: + ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) + case r.Txn != nil: + ar.resp, ar.err = a.s.applyV3.Txn(r.Txn) + case r.Compaction != nil: + ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction) + case r.LeaseGrant != nil: + ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant) + case r.LeaseRevoke != nil: + ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke) + case r.Alarm != nil: + ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm) + case r.Authenticate != nil: + ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate) + case r.AuthEnable != nil: + ar.resp, ar.err = a.s.applyV3.AuthEnable() + case r.AuthDisable != nil: + ar.resp, ar.err = a.s.applyV3.AuthDisable() + case r.AuthUserAdd != nil: + ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd) + case r.AuthUserDelete != nil: + ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete) + case r.AuthUserChangePassword != nil: + ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword) + case r.AuthUserGrantRole != nil: + ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole) + case r.AuthUserGet != nil: + ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet) + case r.AuthUserRevokeRole != nil: + ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole) + case r.AuthRoleAdd != nil: + ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd) + case r.AuthRoleGrantPermission != nil: + ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) + case r.AuthRoleGet != nil: + ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet) + case r.AuthRoleRevokePermission != nil: + ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) + case r.AuthRoleDelete != nil: + ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete) + case r.AuthUserList != nil: + ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList) + case r.AuthRoleList != nil: + ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList) + default: + panic("not implemented") + } + return ar +} + +func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) { + resp = &pb.PutResponse{} + resp.Header = &pb.ResponseHeader{} + + val, leaseID := p.Value, lease.LeaseID(p.Lease) + if txn == nil { + if leaseID != lease.NoLease { + if l := a.s.lessor.Lookup(leaseID); l == nil { + return nil, lease.ErrLeaseNotFound + } + } + txn = a.s.KV().Write() + defer txn.End() + } + + var rr *mvcc.RangeResult + if p.IgnoreValue || p.IgnoreLease || p.PrevKv { + rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return nil, err + } + } + if p.IgnoreValue || p.IgnoreLease { + if rr == nil || len(rr.KVs) == 0 { + // ignore_{lease,value} flag expects previous key-value pair + return nil, ErrKeyNotFound + } + } + if p.IgnoreValue { + val = rr.KVs[0].Value + } + if p.IgnoreLease { + leaseID = lease.LeaseID(rr.KVs[0].Lease) + } + if p.PrevKv { + if rr != nil && len(rr.KVs) != 0 { + resp.PrevKv = &rr.KVs[0] + } + } + + resp.Header.Revision = txn.Put(p.Key, val, leaseID) + return resp, nil +} + +func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + resp := &pb.DeleteRangeResponse{} + resp.Header = &pb.ResponseHeader{} + + if txn == nil { + txn = a.s.kv.Write() + defer txn.End() + } + + if isGteRange(dr.RangeEnd) { + dr.RangeEnd = []byte{} + } + + if dr.PrevKv { + rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) + if err != nil { + return nil, err + } + if rr != nil { + for i := range rr.KVs { + resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) + } + } + } + + resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd) + return resp, nil +} + +func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { + resp := &pb.RangeResponse{} + resp.Header = &pb.ResponseHeader{} + + if txn == nil { + txn = a.s.kv.Read() + defer txn.End() + } + + if isGteRange(r.RangeEnd) { + r.RangeEnd = []byte{} + } + + limit := r.Limit + if r.SortOrder != pb.RangeRequest_NONE || + r.MinModRevision != 0 || r.MaxModRevision != 0 || + r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 { + // fetch everything; sort and truncate afterwards + limit = 0 + } + if limit > 0 { + // fetch one extra for 'more' flag + limit = limit + 1 + } + + ro := mvcc.RangeOptions{ + Limit: limit, + Rev: r.Revision, + Count: r.CountOnly, + } + + rr, err := txn.Range(r.Key, r.RangeEnd, ro) + if err != nil { + return nil, err + } + + if r.MaxModRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision } + pruneKVs(rr, f) + } + if r.MinModRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision } + pruneKVs(rr, f) + } + if r.MaxCreateRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision } + pruneKVs(rr, f) + } + if r.MinCreateRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision } + pruneKVs(rr, f) + } + + sortOrder := r.SortOrder + if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE { + // Since current mvcc.Range implementation returns results + // sorted by keys in lexiographically ascending order, + // sort ASCEND by default only when target is not 'KEY' + sortOrder = pb.RangeRequest_ASCEND + } + if sortOrder != pb.RangeRequest_NONE { + var sorter sort.Interface + switch { + case r.SortTarget == pb.RangeRequest_KEY: + sorter = &kvSortByKey{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_VERSION: + sorter = &kvSortByVersion{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_CREATE: + sorter = &kvSortByCreate{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_MOD: + sorter = &kvSortByMod{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_VALUE: + sorter = &kvSortByValue{&kvSort{rr.KVs}} + } + switch { + case sortOrder == pb.RangeRequest_ASCEND: + sort.Sort(sorter) + case sortOrder == pb.RangeRequest_DESCEND: + sort.Sort(sort.Reverse(sorter)) + } + } + + if r.Limit > 0 && len(rr.KVs) > int(r.Limit) { + rr.KVs = rr.KVs[:r.Limit] + resp.More = true + } + + resp.Header.Revision = rr.Rev + resp.Count = int64(rr.Count) + for i := range rr.KVs { + if r.KeysOnly { + rr.KVs[i].Value = nil + } + resp.Kvs = append(resp.Kvs, &rr.KVs[i]) + } + return resp, nil +} + +func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { + isWrite := !isTxnReadonly(rt) + txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read()) + + reqs, ok := a.compareToOps(txn, rt) + if isWrite { + if err := a.checkRequestPut(txn, reqs); err != nil { + txn.End() + return nil, err + } + } + if err := checkRequestRange(txn, reqs); err != nil { + txn.End() + return nil, err + } + + resps := make([]*pb.ResponseOp, len(reqs)) + txnResp := &pb.TxnResponse{ + Responses: resps, + Succeeded: ok, + Header: &pb.ResponseHeader{}, + } + + // When executing mutable txn ops, etcd must hold the txn lock so + // readers do not see any intermediate results. Since writes are + // serialized on the raft loop, the revision in the read view will + // be the revision of the write txn. + if isWrite { + txn.End() + txn = a.s.KV().Write() + } + for i := range reqs { + resps[i] = a.applyUnion(txn, reqs[i]) + } + rev := txn.Rev() + if len(txn.Changes()) != 0 { + rev++ + } + txn.End() + + txnResp.Header.Revision = rev + return txnResp, nil +} + +func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) { + for _, c := range rt.Compare { + if !applyCompare(rv, c) { + return rt.Failure, false + } + } + return rt.Success, true +} + +// applyCompare applies the compare request. +// If the comparison succeeds, it returns true. Otherwise, returns false. +func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { + rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return false + } + var ckv mvccpb.KeyValue + if len(rr.KVs) != 0 { + ckv = rr.KVs[0] + } else { + // Use the zero value of ckv normally. However... + if c.Target == pb.Compare_VALUE { + // Always fail if we're comparing a value on a key that doesn't exist. + // We can treat non-existence as the empty set explicitly, such that + // even a key with a value of length 0 bytes is still a real key + // that was written that way + return false + } + } + + // -1 is less, 0 is equal, 1 is greater + var result int + switch c.Target { + case pb.Compare_VALUE: + tv, _ := c.TargetUnion.(*pb.Compare_Value) + if tv != nil { + result = bytes.Compare(ckv.Value, tv.Value) + } + case pb.Compare_CREATE: + tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision) + if tv != nil { + result = compareInt64(ckv.CreateRevision, tv.CreateRevision) + } + + case pb.Compare_MOD: + tv, _ := c.TargetUnion.(*pb.Compare_ModRevision) + if tv != nil { + result = compareInt64(ckv.ModRevision, tv.ModRevision) + } + case pb.Compare_VERSION: + tv, _ := c.TargetUnion.(*pb.Compare_Version) + if tv != nil { + result = compareInt64(ckv.Version, tv.Version) + } + } + + switch c.Result { + case pb.Compare_EQUAL: + return result == 0 + case pb.Compare_NOT_EQUAL: + return result != 0 + case pb.Compare_GREATER: + return result > 0 + case pb.Compare_LESS: + return result < 0 + } + return true +} + +func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp { + switch tv := union.Request.(type) { + case *pb.RequestOp_RequestRange: + if tv.RequestRange != nil { + resp, err := a.Range(txn, tv.RequestRange) + if err != nil { + plog.Panicf("unexpected error during txn: %v", err) + } + return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}} + } + case *pb.RequestOp_RequestPut: + if tv.RequestPut != nil { + resp, err := a.Put(txn, tv.RequestPut) + if err != nil { + plog.Panicf("unexpected error during txn: %v", err) + } + return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}} + } + case *pb.RequestOp_RequestDeleteRange: + if tv.RequestDeleteRange != nil { + resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) + if err != nil { + plog.Panicf("unexpected error during txn: %v", err) + } + return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}} + } + default: + // empty union + return nil + } + return nil + +} + +func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) { + resp := &pb.CompactionResponse{} + resp.Header = &pb.ResponseHeader{} + ch, err := a.s.KV().Compact(compaction.Revision) + if err != nil { + return nil, ch, err + } + // get the current revision. which key to get is not important. + rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{}) + resp.Header.Revision = rr.Rev + return resp, ch, err +} + +func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) + resp := &pb.LeaseGrantResponse{} + if err == nil { + resp.ID = int64(l.ID) + resp.TTL = l.TTL() + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + err := a.s.lessor.Revoke(lease.LeaseID(lc.ID)) + return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err +} + +func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { + resp := &pb.AlarmResponse{} + oldCount := len(a.s.alarmStore.Get(ar.Alarm)) + + switch ar.Action { + case pb.AlarmRequest_GET: + resp.Alarms = a.s.alarmStore.Get(ar.Alarm) + case pb.AlarmRequest_ACTIVATE: + m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm) + if m == nil { + break + } + resp.Alarms = append(resp.Alarms, m) + activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1 + if !activated { + break + } + + switch m.Alarm { + case pb.AlarmType_NOSPACE: + plog.Warningf("alarm raised %+v", m) + a.s.applyV3 = newApplierV3Capped(a) + default: + plog.Errorf("unimplemented alarm activation (%+v)", m) + } + case pb.AlarmRequest_DEACTIVATE: + m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) + if m == nil { + break + } + resp.Alarms = append(resp.Alarms, m) + deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0 + if !deactivated { + break + } + + switch m.Alarm { + case pb.AlarmType_NOSPACE: + plog.Infof("alarm disarmed %+v", ar) + a.s.applyV3 = a.s.newApplierV3() + default: + plog.Errorf("unimplemented alarm deactivation (%+v)", m) + } + default: + return nil, nil + } + return resp, nil +} + +type applierV3Capped struct { + applierV3 + q backendQuota +} + +// newApplierV3Capped creates an applyV3 that will reject Puts and transactions +// with Puts so that the number of keys in the store is capped. +func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } + +func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { + return nil, ErrNoSpace +} + +func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) { + if a.q.Cost(r) > 0 { + return nil, ErrNoSpace + } + return a.applierV3.Txn(r) +} + +func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + return nil, ErrNoSpace +} + +func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { + err := a.s.AuthStore().AuthEnable() + if err != nil { + return nil, err + } + return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil +} + +func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { + a.s.AuthStore().AuthDisable() + return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil +} + +func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { + ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) + resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + resp, err := a.s.AuthStore().UserAdd(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + resp, err := a.s.AuthStore().UserDelete(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + resp, err := a.s.AuthStore().UserChangePassword(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + resp, err := a.s.AuthStore().UserGrantRole(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + resp, err := a.s.AuthStore().UserGet(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + resp, err := a.s.AuthStore().UserRevokeRole(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + resp, err := a.s.AuthStore().RoleAdd(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + resp, err := a.s.AuthStore().RoleGrantPermission(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + resp, err := a.s.AuthStore().RoleGet(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + resp, err := a.s.AuthStore().RoleRevokePermission(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + resp, err := a.s.AuthStore().RoleDelete(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + resp, err := a.s.AuthStore().UserList(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + resp, err := a.s.AuthStore().RoleList(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +type quotaApplierV3 struct { + applierV3 + q Quota +} + +func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { + return "aApplierV3{app, NewBackendQuota(s)} +} + +func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { + ok := a.q.Available(p) + resp, err := a.applierV3.Put(txn, p) + if err == nil && !ok { + err = ErrNoSpace + } + return resp, err +} + +func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { + ok := a.q.Available(rt) + resp, err := a.applierV3.Txn(rt) + if err == nil && !ok { + err = ErrNoSpace + } + return resp, err +} + +func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + ok := a.q.Available(lc) + resp, err := a.applierV3.LeaseGrant(lc) + if err == nil && !ok { + err = ErrNoSpace + } + return resp, err +} + +type kvSort struct{ kvs []mvccpb.KeyValue } + +func (s *kvSort) Swap(i, j int) { + t := s.kvs[i] + s.kvs[i] = s.kvs[j] + s.kvs[j] = t +} +func (s *kvSort) Len() int { return len(s.kvs) } + +type kvSortByKey struct{ *kvSort } + +func (s *kvSortByKey) Less(i, j int) bool { + return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0 +} + +type kvSortByVersion struct{ *kvSort } + +func (s *kvSortByVersion) Less(i, j int) bool { + return (s.kvs[i].Version - s.kvs[j].Version) < 0 +} + +type kvSortByCreate struct{ *kvSort } + +func (s *kvSortByCreate) Less(i, j int) bool { + return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0 +} + +type kvSortByMod struct{ *kvSort } + +func (s *kvSortByMod) Less(i, j int) bool { + return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0 +} + +type kvSortByValue struct{ *kvSort } + +func (s *kvSortByValue) Less(i, j int) bool { + return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 +} + +func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error { + for _, requ := range reqs { + tv, ok := requ.Request.(*pb.RequestOp_RequestPut) + if !ok { + continue + } + preq := tv.RequestPut + if preq == nil { + continue + } + if preq.IgnoreValue || preq.IgnoreLease { + // expects previous key-value, error if not exist + rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return err + } + if rr == nil || len(rr.KVs) == 0 { + return ErrKeyNotFound + } + } + if lease.LeaseID(preq.Lease) == lease.NoLease { + continue + } + if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil { + return lease.ErrLeaseNotFound + } + } + return nil +} + +func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error { + for _, requ := range reqs { + tv, ok := requ.Request.(*pb.RequestOp_RequestRange) + if !ok { + continue + } + greq := tv.RequestRange + if greq == nil || greq.Revision == 0 { + continue + } + + if greq.Revision > rv.Rev() { + return mvcc.ErrFutureRev + } + if greq.Revision < rv.FirstRev() { + return mvcc.ErrCompacted + } + } + return nil +} + +func compareInt64(a, b int64) int { + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } +} + +// isGteRange determines if the range end is a >= range. This works around grpc +// sending empty byte strings as nil; >= is encoded in the range end as '\0'. +func isGteRange(rangeEnd []byte) bool { + return len(rangeEnd) == 1 && rangeEnd[0] == 0 +} + +func noSideEffect(r *pb.InternalRaftRequest) bool { + return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil +} + +func removeNeedlessRangeReqs(txn *pb.TxnRequest) { + f := func(ops []*pb.RequestOp) []*pb.RequestOp { + j := 0 + for i := 0; i < len(ops); i++ { + if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok { + continue + } + ops[j] = ops[i] + j++ + } + + return ops[:j] + } + + txn.Success = f(txn.Success) + txn.Failure = f(txn.Failure) +} + +func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) { + j := 0 + for i := range rr.KVs { + rr.KVs[j] = rr.KVs[i] + if !isPrunable(&rr.KVs[i]) { + j++ + } + } + rr.KVs = rr.KVs[:j] +} + +func newHeader(s *EtcdServer) *pb.ResponseHeader { + return &pb.ResponseHeader{ + ClusterId: uint64(s.Cluster().ID()), + MemberId: uint64(s.ID()), + Revision: s.KV().Rev(), + RaftTerm: s.Term(), + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go new file mode 100644 index 000000000..7da4ae45d --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go @@ -0,0 +1,196 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "sync" + + "github.com/coreos/etcd/auth" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc" +) + +type authApplierV3 struct { + applierV3 + as auth.AuthStore + + // mu serializes Apply so that user isn't corrupted and so that + // serialized requests don't leak data from TOCTOU errors + mu sync.Mutex + + authInfo auth.AuthInfo +} + +func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 { + return &authApplierV3{applierV3: base, as: as} +} + +func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult { + aa.mu.Lock() + defer aa.mu.Unlock() + if r.Header != nil { + // backward-compatible with pre-3.0 releases when internalRaftRequest + // does not have header field + aa.authInfo.Username = r.Header.Username + aa.authInfo.Revision = r.Header.AuthRevision + } + if needAdminPermission(r) { + if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil { + aa.authInfo.Username = "" + aa.authInfo.Revision = 0 + return &applyResult{err: err} + } + } + ret := aa.applierV3.Apply(r) + aa.authInfo.Username = "" + aa.authInfo.Revision = 0 + return ret +} + +func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) { + if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { + return nil, err + } + if r.PrevKv { + err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil) + if err != nil { + return nil, err + } + } + return aa.applierV3.Put(txn, r) +} + +func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { + if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { + return nil, err + } + return aa.applierV3.Range(txn, r) +} + +func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { + return nil, err + } + if r.PrevKv { + err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd) + if err != nil { + return nil, err + } + } + + return aa.applierV3.DeleteRange(txn, r) +} + +func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { + for _, requ := range reqs { + switch tv := requ.Request.(type) { + case *pb.RequestOp_RequestRange: + if tv.RequestRange == nil { + continue + } + + if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil { + return err + } + + case *pb.RequestOp_RequestPut: + if tv.RequestPut == nil { + continue + } + + if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil { + return err + } + + case *pb.RequestOp_RequestDeleteRange: + if tv.RequestDeleteRange == nil { + continue + } + + if tv.RequestDeleteRange.PrevKv { + err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) + if err != nil { + return err + } + } + + err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) + if err != nil { + return err + } + } + } + + return nil +} + +func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { + for _, c := range rt.Compare { + if err := as.IsRangePermitted(ai, c.Key, nil); err != nil { + return err + } + } + if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil { + return err + } + if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil { + return err + } + return nil +} + +func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { + if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil { + return nil, err + } + return aa.applierV3.Txn(rt) +} + +func needAdminPermission(r *pb.InternalRaftRequest) bool { + switch { + case r.AuthEnable != nil: + return true + case r.AuthDisable != nil: + return true + case r.AuthUserAdd != nil: + return true + case r.AuthUserDelete != nil: + return true + case r.AuthUserChangePassword != nil: + return true + case r.AuthUserGrantRole != nil: + return true + case r.AuthUserGet != nil: + return true + case r.AuthUserRevokeRole != nil: + return true + case r.AuthRoleAdd != nil: + return true + case r.AuthRoleGrantPermission != nil: + return true + case r.AuthRoleGet != nil: + return true + case r.AuthRoleRevokePermission != nil: + return true + case r.AuthRoleDelete != nil: + return true + case r.AuthUserList != nil: + return true + case r.AuthRoleList != nil: + return true + default: + return false + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go new file mode 100644 index 000000000..f278efca8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go @@ -0,0 +1,140 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "encoding/json" + "path" + "time" + + "github.com/coreos/etcd/etcdserver/api" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/store" + "github.com/coreos/go-semver/semver" +) + +// ApplierV2 is the interface for processing V2 raft messages +type ApplierV2 interface { + Delete(r *pb.Request) Response + Post(r *pb.Request) Response + Put(r *pb.Request) Response + QGet(r *pb.Request) Response + Sync(r *pb.Request) Response +} + +func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 { + return &applierV2store{store: s, cluster: c} +} + +type applierV2store struct { + store store.Store + cluster *membership.RaftCluster +} + +func (a *applierV2store) Delete(r *pb.Request) Response { + switch { + case r.PrevIndex > 0 || r.PrevValue != "": + return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex)) + default: + return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive)) + } +} + +func (a *applierV2store) Post(r *pb.Request) Response { + return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, toTTLOptions(r))) +} + +func (a *applierV2store) Put(r *pb.Request) Response { + ttlOptions := toTTLOptions(r) + exists, existsSet := pbutil.GetBool(r.PrevExist) + switch { + case existsSet: + if exists { + if r.PrevIndex == 0 && r.PrevValue == "" { + return toResponse(a.store.Update(r.Path, r.Val, ttlOptions)) + } + return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions)) + } + return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions)) + case r.PrevIndex > 0 || r.PrevValue != "": + return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions)) + default: + if storeMemberAttributeRegexp.MatchString(r.Path) { + id := membership.MustParseMemberIDFromKey(path.Dir(r.Path)) + var attr membership.Attributes + if err := json.Unmarshal([]byte(r.Val), &attr); err != nil { + plog.Panicf("unmarshal %s should never fail: %v", r.Val, err) + } + if a.cluster != nil { + a.cluster.UpdateAttributes(id, attr) + } + // return an empty response since there is no consumer. + return Response{} + } + if r.Path == membership.StoreClusterVersionKey() { + if a.cluster != nil { + a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability) + } + // return an empty response since there is no consumer. + return Response{} + } + return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions)) + } +} + +func (a *applierV2store) QGet(r *pb.Request) Response { + return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted)) +} + +func (a *applierV2store) Sync(r *pb.Request) Response { + a.store.DeleteExpiredKeys(time.Unix(0, r.Time)) + return Response{} +} + +// applyV2Request interprets r as a call to store.X and returns a Response interpreted +// from store.Event +func (s *EtcdServer) applyV2Request(r *pb.Request) Response { + toTTLOptions(r) + switch r.Method { + case "POST": + return s.applyV2.Post(r) + case "PUT": + return s.applyV2.Put(r) + case "DELETE": + return s.applyV2.Delete(r) + case "QGET": + return s.applyV2.QGet(r) + case "SYNC": + return s.applyV2.Sync(r) + default: + // This should never be reached, but just in case: + return Response{err: ErrUnknownMethod} + } +} + +func toTTLOptions(r *pb.Request) store.TTLOptionSet { + refresh, _ := pbutil.GetBool(r.Refresh) + ttlOptions := store.TTLOptionSet{Refresh: refresh} + if r.Expiration != 0 { + ttlOptions.ExpireTime = time.Unix(0, r.Expiration) + } + return ttlOptions +} + +func toResponse(ev *store.Event, err error) Response { + return Response{Event: ev, err: err} +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/github.com/coreos/etcd/etcdserver/backend.go new file mode 100644 index 000000000..c5e2dabf3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/backend.go @@ -0,0 +1,81 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "fmt" + "os" + "time" + + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" +) + +func newBackend(cfg *ServerConfig) backend.Backend { + bcfg := backend.DefaultBackendConfig() + bcfg.Path = cfg.backendPath() + if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { + // permit 10% excess over quota for disarm + bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) + } + return backend.New(bcfg) +} + +// openSnapshotBackend renames a snapshot db to the current etcd db and opens it. +func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) { + snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) + if err != nil { + return nil, fmt.Errorf("database snapshot file path error: %v", err) + } + if err := os.Rename(snapPath, cfg.backendPath()); err != nil { + return nil, fmt.Errorf("rename snapshot file error: %v", err) + } + return openBackend(cfg), nil +} + +// openBackend returns a backend using the current etcd db. +func openBackend(cfg *ServerConfig) backend.Backend { + fn := cfg.backendPath() + beOpened := make(chan backend.Backend) + go func() { + beOpened <- newBackend(cfg) + }() + select { + case be := <-beOpened: + return be + case <-time.After(time.Second): + plog.Warningf("another etcd process is using %q and holds the file lock.", fn) + plog.Warningf("waiting for it to exit before starting...") + } + return <-beOpened +} + +// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes +// before updating the backend db after persisting raft snapshot to disk, +// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this +// case, replace the db with the snapshot db sent by the leader. +func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) { + var cIndex consistentIndex + kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex) + defer kv.Close() + if snapshot.Metadata.Index <= kv.ConsistentIndex() { + return oldbe, nil + } + oldbe.Close() + return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go new file mode 100644 index 000000000..f44862a46 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go @@ -0,0 +1,258 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "sort" + "time" + + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/version" + "github.com/coreos/go-semver/semver" +) + +// isMemberBootstrapped tries to check if the given member has been bootstrapped +// in the given cluster. +func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool { + rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt) + if err != nil { + return false + } + id := cl.MemberByName(member).ID + m := rcl.Member(id) + if m == nil { + return false + } + if len(m.ClientURLs) > 0 { + return true + } + return false +} + +// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and +// attempts to construct a Cluster by accessing the members endpoint on one of +// these URLs. The first URL to provide a response is used. If no URLs provide +// a response, or a Cluster cannot be successfully created from a received +// response, an error is returned. +// Each request has a 10-second timeout. Because the upper limit of TTL is 5s, +// 10 second is enough for building connection and finishing request. +func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) { + return getClusterFromRemotePeers(urls, 10*time.Second, true, rt) +} + +// If logerr is true, it prints out more error messages. +func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) { + cc := &http.Client{ + Transport: rt, + Timeout: timeout, + } + for _, u := range urls { + resp, err := cc.Get(u + "/members") + if err != nil { + if logerr { + plog.Warningf("could not get cluster response from %s: %v", u, err) + } + continue + } + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + if logerr { + plog.Warningf("could not read the body of cluster response: %v", err) + } + continue + } + var membs []*membership.Member + if err = json.Unmarshal(b, &membs); err != nil { + if logerr { + plog.Warningf("could not unmarshal cluster response: %v", err) + } + continue + } + id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID")) + if err != nil { + if logerr { + plog.Warningf("could not parse the cluster ID from cluster res: %v", err) + } + continue + } + + // check the length of membership members + // if the membership members are present then prepare and return raft cluster + // if membership members are not present then the raft cluster formed will be + // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error + if len(membs) > 0 { + return membership.NewClusterFromMembers("", id, membs), nil + } + + return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.") + } + return nil, fmt.Errorf("could not retrieve cluster information from the given urls") +} + +// getRemotePeerURLs returns peer urls of remote members in the cluster. The +// returned list is sorted in ascending lexicographical order. +func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string { + us := make([]string, 0) + for _, m := range cl.Members() { + if m.Name == local { + continue + } + us = append(us, m.PeerURLs...) + } + sort.Strings(us) + return us +} + +// getVersions returns the versions of the members in the given cluster. +// The key of the returned map is the member's ID. The value of the returned map +// is the semver versions string, including server and cluster. +// If it fails to get the version of a member, the key will be nil. +func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { + members := cl.Members() + vers := make(map[string]*version.Versions) + for _, m := range members { + if m.ID == local { + cv := "not_decided" + if cl.Version() != nil { + cv = cl.Version().String() + } + vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv} + continue + } + ver, err := getVersion(m, rt) + if err != nil { + plog.Warningf("cannot get the version of member %s (%v)", m.ID, err) + vers[m.ID.String()] = nil + } else { + vers[m.ID.String()] = ver + } + } + return vers +} + +// decideClusterVersion decides the cluster version based on the versions map. +// The returned version is the min server version in the map, or nil if the min +// version in unknown. +func decideClusterVersion(vers map[string]*version.Versions) *semver.Version { + var cv *semver.Version + lv := semver.Must(semver.NewVersion(version.Version)) + + for mid, ver := range vers { + if ver == nil { + return nil + } + v, err := semver.NewVersion(ver.Server) + if err != nil { + plog.Errorf("cannot understand the version of member %s (%v)", mid, err) + return nil + } + if lv.LessThan(*v) { + plog.Warningf("the local etcd version %s is not up-to-date", lv.String()) + plog.Warningf("member %s has a higher version %s", mid, ver.Server) + } + if cv == nil { + cv = v + } else if v.LessThan(*cv) { + cv = v + } + } + return cv +} + +// isCompatibleWithCluster return true if the local member has a compatible version with +// the current running cluster. +// The version is considered as compatible when at least one of the other members in the cluster has a +// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version +// out of the range. +// We set this rule since when the local member joins, another member might be offline. +func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { + vers := getVersions(cl, local, rt) + minV := semver.Must(semver.NewVersion(version.MinClusterVersion)) + maxV := semver.Must(semver.NewVersion(version.Version)) + maxV = &semver.Version{ + Major: maxV.Major, + Minor: maxV.Minor, + } + + return isCompatibleWithVers(vers, local, minV, maxV) +} + +func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool { + var ok bool + for id, v := range vers { + // ignore comparison with local version + if id == local.String() { + continue + } + if v == nil { + continue + } + clusterv, err := semver.NewVersion(v.Cluster) + if err != nil { + plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err) + continue + } + if clusterv.LessThan(*minV) { + plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String()) + return false + } + if maxV.LessThan(*clusterv) { + plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String()) + return false + } + ok = true + } + return ok +} + +// getVersion returns the Versions of the given member via its +// peerURLs. Returns the last error if it fails to get the version. +func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) { + cc := &http.Client{ + Transport: rt, + } + var ( + err error + resp *http.Response + ) + + for _, u := range m.PeerURLs { + resp, err = cc.Get(u + "/version") + if err != nil { + plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) + continue + } + var b []byte + b, err = ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err) + continue + } + var vers version.Versions + if err = json.Unmarshal(b, &vers); err != nil { + plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err) + continue + } + return &vers, nil + } + return nil, err +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go new file mode 100644 index 000000000..9c258934a --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/config.go @@ -0,0 +1,204 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "fmt" + "path/filepath" + "sort" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/coreos/etcd/pkg/netutil" + "github.com/coreos/etcd/pkg/transport" + "github.com/coreos/etcd/pkg/types" +) + +// ServerConfig holds the configuration of etcd as taken from the command line or discovery. +type ServerConfig struct { + Name string + DiscoveryURL string + DiscoveryProxy string + ClientURLs types.URLs + PeerURLs types.URLs + DataDir string + // DedicatedWALDir config will make the etcd to write the WAL to the WALDir + // rather than the dataDir/member/wal. + DedicatedWALDir string + SnapCount uint64 + MaxSnapFiles uint + MaxWALFiles uint + InitialPeerURLsMap types.URLsMap + InitialClusterToken string + NewCluster bool + ForceNewCluster bool + PeerTLSInfo transport.TLSInfo + + TickMs uint + ElectionTicks int + BootstrapTimeout time.Duration + + AutoCompactionRetention int + QuotaBackendBytes int64 + + StrictReconfigCheck bool + + // ClientCertAuthEnabled is true when cert has been signed by the client CA. + ClientCertAuthEnabled bool + + AuthToken string +} + +// VerifyBootstrap sanity-checks the initial config for bootstrap case +// and returns an error for things that should never happen. +func (c *ServerConfig) VerifyBootstrap() error { + if err := c.hasLocalMember(); err != nil { + return err + } + if err := c.advertiseMatchesCluster(); err != nil { + return err + } + if checkDuplicateURL(c.InitialPeerURLsMap) { + return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap) + } + if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" { + return fmt.Errorf("initial cluster unset and no discovery URL found") + } + return nil +} + +// VerifyJoinExisting sanity-checks the initial config for join existing cluster +// case and returns an error for things that should never happen. +func (c *ServerConfig) VerifyJoinExisting() error { + // The member has announced its peer urls to the cluster before starting; no need to + // set the configuration again. + if err := c.hasLocalMember(); err != nil { + return err + } + if checkDuplicateURL(c.InitialPeerURLsMap) { + return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap) + } + if c.DiscoveryURL != "" { + return fmt.Errorf("discovery URL should not be set when joining existing initial cluster") + } + return nil +} + +// hasLocalMember checks that the cluster at least contains the local server. +func (c *ServerConfig) hasLocalMember() error { + if urls := c.InitialPeerURLsMap[c.Name]; urls == nil { + return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name) + } + return nil +} + +// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list. +func (c *ServerConfig) advertiseMatchesCluster() error { + urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice() + urls.Sort() + sort.Strings(apurls) + ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer cancel() + if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) { + umap := map[string]types.URLs{c.Name: c.PeerURLs} + return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ",")) + } + return nil +} + +func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") } + +func (c *ServerConfig) WALDir() string { + if c.DedicatedWALDir != "" { + return c.DedicatedWALDir + } + return filepath.Join(c.MemberDir(), "wal") +} + +func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") } + +func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" } + +// ReqTimeout returns timeout for request to finish. +func (c *ServerConfig) ReqTimeout() time.Duration { + // 5s for queue waiting, computation and disk IO delay + // + 2 * election timeout for possible leader election + return 5*time.Second + 2*time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond +} + +func (c *ServerConfig) electionTimeout() time.Duration { + return time.Duration(c.ElectionTicks) * time.Duration(c.TickMs) * time.Millisecond +} + +func (c *ServerConfig) peerDialTimeout() time.Duration { + // 1s for queue wait and system delay + // + one RTT, which is smaller than 1/5 election timeout + return time.Second + time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond/5 +} + +func (c *ServerConfig) PrintWithInitial() { c.print(true) } + +func (c *ServerConfig) Print() { c.print(false) } + +func (c *ServerConfig) print(initial bool) { + plog.Infof("name = %s", c.Name) + if c.ForceNewCluster { + plog.Infof("force new cluster") + } + plog.Infof("data dir = %s", c.DataDir) + plog.Infof("member dir = %s", c.MemberDir()) + if c.DedicatedWALDir != "" { + plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir) + } + plog.Infof("heartbeat = %dms", c.TickMs) + plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs)) + plog.Infof("snapshot count = %d", c.SnapCount) + if len(c.DiscoveryURL) != 0 { + plog.Infof("discovery URL= %s", c.DiscoveryURL) + if len(c.DiscoveryProxy) != 0 { + plog.Infof("discovery proxy = %s", c.DiscoveryProxy) + } + } + plog.Infof("advertise client URLs = %s", c.ClientURLs) + if initial { + plog.Infof("initial advertise peer URLs = %s", c.PeerURLs) + plog.Infof("initial cluster = %s", c.InitialPeerURLsMap) + } +} + +func checkDuplicateURL(urlsmap types.URLsMap) bool { + um := make(map[string]bool) + for _, urls := range urlsmap { + for _, url := range urls { + u := url.String() + if um[u] { + return true + } + um[u] = true + } + } + return false +} + +func (c *ServerConfig) bootstrapTimeout() time.Duration { + if c.BootstrapTimeout != 0 { + return c.BootstrapTimeout + } + return time.Second +} + +func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") } diff --git a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go b/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go new file mode 100644 index 000000000..d513f6708 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go @@ -0,0 +1,33 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "sync/atomic" +) + +// consistentIndex represents the offset of an entry in a consistent replica log. +// It implements the mvcc.ConsistentIndexGetter interface. +// It is always set to the offset of current entry before executing the entry, +// so ConsistentWatchableKV could get the consistent index from it. +type consistentIndex uint64 + +func (i *consistentIndex) setConsistentIndex(v uint64) { + atomic.StoreUint64((*uint64)(i), v) +} + +func (i *consistentIndex) ConsistentIndex() uint64 { + return atomic.LoadUint64((*uint64)(i)) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/doc.go b/vendor/github.com/coreos/etcd/etcdserver/doc.go new file mode 100644 index 000000000..b195d2d16 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package etcdserver defines how etcd servers interact and store their states. +package etcdserver diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/github.com/coreos/etcd/etcdserver/errors.go new file mode 100644 index 000000000..ed749dbe8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/errors.go @@ -0,0 +1,46 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "errors" + "fmt" +) + +var ( + ErrUnknownMethod = errors.New("etcdserver: unknown method") + ErrStopped = errors.New("etcdserver: server stopped") + ErrCanceled = errors.New("etcdserver: request cancelled") + ErrTimeout = errors.New("etcdserver: request timed out") + ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure") + ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost") + ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") + ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") + ErrNoLeader = errors.New("etcdserver: no leader") + ErrRequestTooLarge = errors.New("etcdserver: request is too large") + ErrNoSpace = errors.New("etcdserver: no space") + ErrTooManyRequests = errors.New("etcdserver: too many requests") + ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") + ErrKeyNotFound = errors.New("etcdserver: key not found") +) + +type DiscoveryError struct { + Op string + Err error +} + +func (e DiscoveryError) Error() string { + return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go new file mode 100644 index 000000000..90bbd3632 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/metrics.go @@ -0,0 +1,102 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "time" + + "github.com/coreos/etcd/pkg/runtime" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "has_leader", + Help: "Whether or not a leader exists. 1 is existence, 0 is not.", + }) + leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "leader_changes_seen_total", + Help: "The number of leader changes seen.", + }) + proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "proposals_committed_total", + Help: "The total number of consensus proposals committed.", + }) + proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "proposals_applied_total", + Help: "The total number of consensus proposals applied.", + }) + proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "proposals_pending", + Help: "The current number of pending proposals to commit.", + }) + proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "proposals_failed_total", + Help: "The total number of failed proposals seen.", + }) + leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "server", + Name: "lease_expired_total", + Help: "The total number of expired leases.", + }) +) + +func init() { + prometheus.MustRegister(hasLeader) + prometheus.MustRegister(leaderChanges) + prometheus.MustRegister(proposalsCommitted) + prometheus.MustRegister(proposalsApplied) + prometheus.MustRegister(proposalsPending) + prometheus.MustRegister(proposalsFailed) + prometheus.MustRegister(leaseExpired) +} + +func monitorFileDescriptor(done <-chan struct{}) { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + used, err := runtime.FDUsage() + if err != nil { + plog.Errorf("cannot monitor file descriptor usage (%v)", err) + return + } + limit, err := runtime.FDLimit() + if err != nil { + plog.Errorf("cannot monitor file descriptor usage (%v)", err) + return + } + if used >= limit/5*4 { + plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit) + } + select { + case <-ticker.C: + case <-done: + return + } + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go new file mode 100644 index 000000000..87126f156 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/quota.go @@ -0,0 +1,121 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +) + +const ( + // DefaultQuotaBytes is the number of bytes the backend Size may + // consume before exceeding the space quota. + DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB + // MaxQuotaBytes is the maximum number of bytes suggested for a backend + // quota. A larger quota may lead to degraded performance. + MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB +) + +// Quota represents an arbitrary quota against arbitrary requests. Each request +// costs some charge; if there is not enough remaining charge, then there are +// too few resources available within the quota to apply the request. +type Quota interface { + // Available judges whether the given request fits within the quota. + Available(req interface{}) bool + // Cost computes the charge against the quota for a given request. + Cost(req interface{}) int + // Remaining is the amount of charge left for the quota. + Remaining() int64 +} + +type passthroughQuota struct{} + +func (*passthroughQuota) Available(interface{}) bool { return true } +func (*passthroughQuota) Cost(interface{}) int { return 0 } +func (*passthroughQuota) Remaining() int64 { return 1 } + +type backendQuota struct { + s *EtcdServer + maxBackendBytes int64 +} + +const ( + // leaseOverhead is an estimate for the cost of storing a lease + leaseOverhead = 64 + // kvOverhead is an estimate for the cost of storing a key's metadata + kvOverhead = 256 +) + +func NewBackendQuota(s *EtcdServer) Quota { + if s.Cfg.QuotaBackendBytes < 0 { + // disable quotas if negative + plog.Warningf("disabling backend quota") + return &passthroughQuota{} + } + if s.Cfg.QuotaBackendBytes == 0 { + // use default size if no quota size given + return &backendQuota{s, DefaultQuotaBytes} + } + if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { + plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) + } + return &backendQuota{s, s.Cfg.QuotaBackendBytes} +} + +func (b *backendQuota) Available(v interface{}) bool { + // TODO: maybe optimize backend.Size() + return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes +} + +func (b *backendQuota) Cost(v interface{}) int { + switch r := v.(type) { + case *pb.PutRequest: + return costPut(r) + case *pb.TxnRequest: + return costTxn(r) + case *pb.LeaseGrantRequest: + return leaseOverhead + default: + panic("unexpected cost") + } +} + +func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) } + +func costTxnReq(u *pb.RequestOp) int { + r := u.GetRequestPut() + if r == nil { + return 0 + } + return costPut(r) +} + +func costTxn(r *pb.TxnRequest) int { + sizeSuccess := 0 + for _, u := range r.Success { + sizeSuccess += costTxnReq(u) + } + sizeFailure := 0 + for _, u := range r.Failure { + sizeFailure += costTxnReq(u) + } + if sizeFailure > sizeSuccess { + return sizeFailure + } + return sizeSuccess +} + +func (b *backendQuota) Remaining() int64 { + return b.maxBackendBytes - b.s.Backend().Size() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go new file mode 100644 index 000000000..dcb894f82 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/raft.go @@ -0,0 +1,594 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "encoding/json" + "expvar" + "sort" + "sync" + "sync/atomic" + "time" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/pkg/contention" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/rafthttp" + "github.com/coreos/etcd/wal" + "github.com/coreos/etcd/wal/walpb" + "github.com/coreos/pkg/capnslog" +) + +const ( + // Number of entries for slow follower to catch-up after compacting + // the raft storage entries. + // We expect the follower has a millisecond level latency with the leader. + // The max throughput is around 10K. Keep a 5K entries is enough for helping + // follower to catch up. + numberOfCatchUpEntries = 5000 + + // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value). + // Assuming the RTT is around 10ms, 1MB max size is large enough. + maxSizePerMsg = 1 * 1024 * 1024 + // Never overflow the rafthttp buffer, which is 4096. + // TODO: a better const? + maxInflightMsgs = 4096 / 8 +) + +var ( + // protects raftStatus + raftStatusMu sync.Mutex + // indirection for expvar func interface + // expvar panics when publishing duplicate name + // expvar does not support remove a registered name + // so only register a func that calls raftStatus + // and change raftStatus as we need. + raftStatus func() raft.Status +) + +func init() { + raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft")) + expvar.Publish("raft.status", expvar.Func(func() interface{} { + raftStatusMu.Lock() + defer raftStatusMu.Unlock() + return raftStatus() + })) +} + +type RaftTimer interface { + Index() uint64 + Term() uint64 +} + +// apply contains entries, snapshot to be applied. Once +// an apply is consumed, the entries will be persisted to +// to raft storage concurrently; the application must read +// raftDone before assuming the raft messages are stable. +type apply struct { + entries []raftpb.Entry + snapshot raftpb.Snapshot + // notifyc synchronizes etcd server applies with the raft node + notifyc chan struct{} +} + +type raftNode struct { + // Cache of the latest raft index and raft term the server has seen. + // These three unit64 fields must be the first elements to keep 64-bit + // alignment for atomic access to the fields. + index uint64 + term uint64 + lead uint64 + + raftNodeConfig + + // a chan to send/receive snapshot + msgSnapC chan raftpb.Message + + // a chan to send out apply + applyc chan apply + + // a chan to send out readState + readStateC chan raft.ReadState + + // utility + ticker *time.Ticker + // contention detectors for raft heartbeat message + td *contention.TimeoutDetector + + stopped chan struct{} + done chan struct{} +} + +type raftNodeConfig struct { + // to check if msg receiver is removed from cluster + isIDRemoved func(id uint64) bool + raft.Node + raftStorage *raft.MemoryStorage + storage Storage + heartbeat time.Duration // for logging + // transport specifies the transport to send and receive msgs to members. + // Sending messages MUST NOT block. It is okay to drop messages, since + // clients should timeout and reissue their messages. + // If transport is nil, server will panic. + transport rafthttp.Transporter +} + +func newRaftNode(cfg raftNodeConfig) *raftNode { + r := &raftNode{ + raftNodeConfig: cfg, + // set up contention detectors for raft heartbeat message. + // expect to send a heartbeat within 2 heartbeat intervals. + td: contention.NewTimeoutDetector(2 * cfg.heartbeat), + readStateC: make(chan raft.ReadState, 1), + msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), + applyc: make(chan apply), + stopped: make(chan struct{}), + done: make(chan struct{}), + } + if r.heartbeat == 0 { + r.ticker = &time.Ticker{} + } else { + r.ticker = time.NewTicker(r.heartbeat) + } + return r +} + +// start prepares and starts raftNode in a new goroutine. It is no longer safe +// to modify the fields after it has been started. +func (r *raftNode) start(rh *raftReadyHandler) { + internalTimeout := time.Second + + go func() { + defer r.onStop() + islead := false + + for { + select { + case <-r.ticker.C: + r.Tick() + case rd := <-r.Ready(): + if rd.SoftState != nil { + newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead + if newLeader { + leaderChanges.Inc() + } + + if rd.SoftState.Lead == raft.None { + hasLeader.Set(0) + } else { + hasLeader.Set(1) + } + + atomic.StoreUint64(&r.lead, rd.SoftState.Lead) + islead = rd.RaftState == raft.StateLeader + rh.updateLeadership(newLeader) + r.td.Reset() + } + + if len(rd.ReadStates) != 0 { + select { + case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]: + case <-time.After(internalTimeout): + plog.Warningf("timed out sending read state") + case <-r.stopped: + return + } + } + + notifyc := make(chan struct{}, 1) + ap := apply{ + entries: rd.CommittedEntries, + snapshot: rd.Snapshot, + notifyc: notifyc, + } + + updateCommittedIndex(&ap, rh) + + select { + case r.applyc <- ap: + case <-r.stopped: + return + } + + // the leader can write to its disk in parallel with replicating to the followers and them + // writing to their disks. + // For more details, check raft thesis 10.2.1 + if islead { + // gofail: var raftBeforeLeaderSend struct{} + r.transport.Send(r.processMessages(rd.Messages)) + } + + // gofail: var raftBeforeSave struct{} + if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { + plog.Fatalf("raft save state and entries error: %v", err) + } + if !raft.IsEmptyHardState(rd.HardState) { + proposalsCommitted.Set(float64(rd.HardState.Commit)) + } + // gofail: var raftAfterSave struct{} + + if !raft.IsEmptySnap(rd.Snapshot) { + // gofail: var raftBeforeSaveSnap struct{} + if err := r.storage.SaveSnap(rd.Snapshot); err != nil { + plog.Fatalf("raft save snapshot error: %v", err) + } + // etcdserver now claim the snapshot has been persisted onto the disk + notifyc <- struct{}{} + + // gofail: var raftAfterSaveSnap struct{} + r.raftStorage.ApplySnapshot(rd.Snapshot) + plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) + // gofail: var raftAfterApplySnap struct{} + } + + r.raftStorage.Append(rd.Entries) + + if !islead { + // finish processing incoming messages before we signal raftdone chan + msgs := r.processMessages(rd.Messages) + + // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots + notifyc <- struct{}{} + + // Candidate or follower needs to wait for all pending configuration + // changes to be applied before sending messages. + // Otherwise we might incorrectly count votes (e.g. votes from removed members). + // Also slow machine's follower raft-layer could proceed to become the leader + // on its own single-node cluster, before apply-layer applies the config change. + // We simply wait for ALL pending entries to be applied for now. + // We might improve this later on if it causes unnecessary long blocking issues. + waitApply := false + for _, ent := range rd.CommittedEntries { + if ent.Type == raftpb.EntryConfChange { + waitApply = true + break + } + } + if waitApply { + // blocks until 'applyAll' calls 'applyWait.Trigger' + // to be in sync with scheduled config-change job + // (assume notifyc has cap of 1) + select { + case notifyc <- struct{}{}: + case <-r.stopped: + return + } + } + + // gofail: var raftBeforeFollowerSend struct{} + r.transport.Send(msgs) + } else { + // leader already processed 'MsgSnap' and signaled + notifyc <- struct{}{} + } + + r.Advance() + case <-r.stopped: + return + } + } + }() +} + +func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { + var ci uint64 + if len(ap.entries) != 0 { + ci = ap.entries[len(ap.entries)-1].Index + } + if ap.snapshot.Metadata.Index > ci { + ci = ap.snapshot.Metadata.Index + } + if ci != 0 { + rh.updateCommittedIndex(ci) + } +} + +func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { + sentAppResp := false + for i := len(ms) - 1; i >= 0; i-- { + if r.isIDRemoved(ms[i].To) { + ms[i].To = 0 + } + + if ms[i].Type == raftpb.MsgAppResp { + if sentAppResp { + ms[i].To = 0 + } else { + sentAppResp = true + } + } + + if ms[i].Type == raftpb.MsgSnap { + // There are two separate data store: the store for v2, and the KV for v3. + // The msgSnap only contains the most recent snapshot of store without KV. + // So we need to redirect the msgSnap to etcd server main loop for merging in the + // current store snapshot and KV snapshot. + select { + case r.msgSnapC <- ms[i]: + default: + // drop msgSnap if the inflight chan if full. + } + ms[i].To = 0 + } + if ms[i].Type == raftpb.MsgHeartbeat { + ok, exceed := r.td.Observe(ms[i].To) + if !ok { + // TODO: limit request rate. + plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed) + plog.Warningf("server is likely overloaded") + } + } + } + return ms +} + +func (r *raftNode) apply() chan apply { + return r.applyc +} + +func (r *raftNode) stop() { + r.stopped <- struct{}{} + <-r.done +} + +func (r *raftNode) onStop() { + r.Stop() + r.ticker.Stop() + r.transport.Stop() + if err := r.storage.Close(); err != nil { + plog.Panicf("raft close storage error: %v", err) + } + close(r.done) +} + +// for testing +func (r *raftNode) pauseSending() { + p := r.transport.(rafthttp.Pausable) + p.Pause() +} + +func (r *raftNode) resumeSending() { + p := r.transport.(rafthttp.Pausable) + p.Resume() +} + +// advanceTicksForElection advances ticks to the node for fast election. +// This reduces the time to wait for first leader election if bootstrapping the whole +// cluster, while leaving at least 1 heartbeat for possible existing leader +// to contact it. +func advanceTicksForElection(n raft.Node, electionTicks int) { + for i := 0; i < electionTicks-1; i++ { + n.Tick() + } +} + +func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) { + var err error + member := cl.MemberByName(cfg.Name) + metadata := pbutil.MustMarshal( + &pb.Metadata{ + NodeID: uint64(member.ID), + ClusterID: uint64(cl.ID()), + }, + ) + if w, err = wal.Create(cfg.WALDir(), metadata); err != nil { + plog.Fatalf("create wal error: %v", err) + } + peers := make([]raft.Peer, len(ids)) + for i, id := range ids { + ctx, err := json.Marshal((*cl).Member(id)) + if err != nil { + plog.Panicf("marshal member should never fail: %v", err) + } + peers[i] = raft.Peer{ID: uint64(id), Context: ctx} + } + id = member.ID + plog.Infof("starting member %s in cluster %s", id, cl.ID()) + s = raft.NewMemoryStorage() + c := &raft.Config{ + ID: uint64(id), + ElectionTick: cfg.ElectionTicks, + HeartbeatTick: 1, + Storage: s, + MaxSizePerMsg: maxSizePerMsg, + MaxInflightMsgs: maxInflightMsgs, + CheckQuorum: true, + } + + n = raft.StartNode(c, peers) + raftStatusMu.Lock() + raftStatus = n.Status + raftStatusMu.Unlock() + advanceTicksForElection(n, c.ElectionTick) + return +} + +func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { + var walsnap walpb.Snapshot + if snapshot != nil { + walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term + } + w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) + + plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit) + cl := membership.NewCluster("") + cl.SetID(cid) + s := raft.NewMemoryStorage() + if snapshot != nil { + s.ApplySnapshot(*snapshot) + } + s.SetHardState(st) + s.Append(ents) + c := &raft.Config{ + ID: uint64(id), + ElectionTick: cfg.ElectionTicks, + HeartbeatTick: 1, + Storage: s, + MaxSizePerMsg: maxSizePerMsg, + MaxInflightMsgs: maxInflightMsgs, + CheckQuorum: true, + } + + n := raft.RestartNode(c) + raftStatusMu.Lock() + raftStatus = n.Status + raftStatusMu.Unlock() + advanceTicksForElection(n, c.ElectionTick) + return id, cl, n, s, w +} + +func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { + var walsnap walpb.Snapshot + if snapshot != nil { + walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term + } + w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) + + // discard the previously uncommitted entries + for i, ent := range ents { + if ent.Index > st.Commit { + plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i) + ents = ents[:i] + break + } + } + + // force append the configuration change entries + toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit) + ents = append(ents, toAppEnts...) + + // force commit newly appended entries + err := w.Save(raftpb.HardState{}, toAppEnts) + if err != nil { + plog.Fatalf("%v", err) + } + if len(ents) != 0 { + st.Commit = ents[len(ents)-1].Index + } + + plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit) + cl := membership.NewCluster("") + cl.SetID(cid) + s := raft.NewMemoryStorage() + if snapshot != nil { + s.ApplySnapshot(*snapshot) + } + s.SetHardState(st) + s.Append(ents) + c := &raft.Config{ + ID: uint64(id), + ElectionTick: cfg.ElectionTicks, + HeartbeatTick: 1, + Storage: s, + MaxSizePerMsg: maxSizePerMsg, + MaxInflightMsgs: maxInflightMsgs, + } + n := raft.RestartNode(c) + raftStatus = n.Status + return id, cl, n, s, w +} + +// getIDs returns an ordered set of IDs included in the given snapshot and +// the entries. The given snapshot/entries can contain two kinds of +// ID-related entry: +// - ConfChangeAddNode, in which case the contained ID will be added into the set. +// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set. +func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { + ids := make(map[uint64]bool) + if snap != nil { + for _, id := range snap.Metadata.ConfState.Nodes { + ids[id] = true + } + } + for _, e := range ents { + if e.Type != raftpb.EntryConfChange { + continue + } + var cc raftpb.ConfChange + pbutil.MustUnmarshal(&cc, e.Data) + switch cc.Type { + case raftpb.ConfChangeAddNode: + ids[cc.NodeID] = true + case raftpb.ConfChangeRemoveNode: + delete(ids, cc.NodeID) + case raftpb.ConfChangeUpdateNode: + // do nothing + default: + plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!") + } + } + sids := make(types.Uint64Slice, 0, len(ids)) + for id := range ids { + sids = append(sids, id) + } + sort.Sort(sids) + return []uint64(sids) +} + +// createConfigChangeEnts creates a series of Raft entries (i.e. +// EntryConfChange) to remove the set of given IDs from the cluster. The ID +// `self` is _not_ removed, even if present in the set. +// If `self` is not inside the given ids, it creates a Raft entry to add a +// default member with the given `self`. +func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry { + ents := make([]raftpb.Entry, 0) + next := index + 1 + found := false + for _, id := range ids { + if id == self { + found = true + continue + } + cc := &raftpb.ConfChange{ + Type: raftpb.ConfChangeRemoveNode, + NodeID: id, + } + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: pbutil.MustMarshal(cc), + Term: term, + Index: next, + } + ents = append(ents, e) + next++ + } + if !found { + m := membership.Member{ + ID: types.ID(self), + RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, + } + ctx, err := json.Marshal(m) + if err != nil { + plog.Panicf("marshal member should never fail: %v", err) + } + cc := &raftpb.ConfChange{ + Type: raftpb.ConfChangeAddNode, + NodeID: self, + Context: ctx, + } + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: pbutil.MustMarshal(cc), + Term: term, + Index: next, + } + ents = append(ents, e) + } + return ents +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go new file mode 100644 index 000000000..38a96f719 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/server.go @@ -0,0 +1,1659 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "encoding/json" + "expvar" + "fmt" + "math" + "math/rand" + "net/http" + "os" + "path" + "regexp" + "sync" + "sync/atomic" + "time" + + "github.com/coreos/etcd/alarm" + "github.com/coreos/etcd/auth" + "github.com/coreos/etcd/compactor" + "github.com/coreos/etcd/discovery" + "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/etcdserver/stats" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/fileutil" + "github.com/coreos/etcd/pkg/idutil" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/pkg/runtime" + "github.com/coreos/etcd/pkg/schedule" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/pkg/wait" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/rafthttp" + "github.com/coreos/etcd/snap" + "github.com/coreos/etcd/store" + "github.com/coreos/etcd/version" + "github.com/coreos/etcd/wal" + "github.com/coreos/go-semver/semver" + "github.com/coreos/pkg/capnslog" + "golang.org/x/net/context" +) + +const ( + DefaultSnapCount = 100000 + + StoreClusterPrefix = "/0" + StoreKeysPrefix = "/1" + + // HealthInterval is the minimum time the cluster should be healthy + // before accepting add member requests. + HealthInterval = 5 * time.Second + + purgeFileInterval = 30 * time.Second + // monitorVersionInterval should be smaller than the timeout + // on the connection. Or we will not be able to reuse the connection + // (since it will timeout). + monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second + + // max number of in-flight snapshot messages etcdserver allows to have + // This number is more than enough for most clusters with 5 machines. + maxInFlightMsgSnap = 16 + + releaseDelayAfterSnapshot = 30 * time.Second + + // maxPendingRevokes is the maximum number of outstanding expired lease revocations. + maxPendingRevokes = 16 +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver") + + storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes")) +) + +func init() { + rand.Seed(time.Now().UnixNano()) + + expvar.Publish( + "file_descriptor_limit", + expvar.Func( + func() interface{} { + n, _ := runtime.FDLimit() + return n + }, + ), + ) +} + +type Response struct { + Event *store.Event + Watcher store.Watcher + err error +} + +type Server interface { + // Start performs any initialization of the Server necessary for it to + // begin serving requests. It must be called before Do or Process. + // Start must be non-blocking; any long-running server functionality + // should be implemented in goroutines. + Start() + // Stop terminates the Server and performs any necessary finalization. + // Do and Process cannot be called after Stop has been invoked. + Stop() + // ID returns the ID of the Server. + ID() types.ID + // Leader returns the ID of the leader Server. + Leader() types.ID + // Do takes a request and attempts to fulfill it, returning a Response. + Do(ctx context.Context, r pb.Request) (Response, error) + // Process takes a raft message and applies it to the server's raft state + // machine, respecting any timeout of the given context. + Process(ctx context.Context, m raftpb.Message) error + // AddMember attempts to add a member into the cluster. It will return + // ErrIDRemoved if member ID is removed from the cluster, or return + // ErrIDExists if member ID exists in the cluster. + AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) + // RemoveMember attempts to remove a member from the cluster. It will + // return ErrIDRemoved if member ID is removed from the cluster, or return + // ErrIDNotFound if member ID is not in the cluster. + RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) + + // UpdateMember attempts to update an existing member in the cluster. It will + // return ErrIDNotFound if the member ID does not exist. + UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) + + // ClusterVersion is the cluster-wide minimum major.minor version. + // Cluster version is set to the min version that an etcd member is + // compatible with when first bootstrap. + // + // ClusterVersion is nil until the cluster is bootstrapped (has a quorum). + // + // During a rolling upgrades, the ClusterVersion will be updated + // automatically after a sync. (5 second by default) + // + // The API/raft component can utilize ClusterVersion to determine if + // it can accept a client request or a raft RPC. + // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and + // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since + // this feature is introduced post 2.0. + ClusterVersion() *semver.Version +} + +// EtcdServer is the production implementation of the Server interface +type EtcdServer struct { + // inflightSnapshots holds count the number of snapshots currently inflight. + inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned. + appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. + committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. + // consistIndex used to hold the offset of current executing entry + // It is initialized to 0 before executing any entry. + consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned. + Cfg *ServerConfig + + readych chan struct{} + r raftNode + + snapCount uint64 + + w wait.Wait + + readMu sync.RWMutex + // read routine notifies etcd server that it waits for reading by sending an empty struct to + // readwaitC + readwaitc chan struct{} + // readNotifier is used to notify the read routine that it can process the request + // when there is no error + readNotifier *notifier + + // stop signals the run goroutine should shutdown. + stop chan struct{} + // stopping is closed by run goroutine on shutdown. + stopping chan struct{} + // done is closed when all goroutines from start() complete. + done chan struct{} + + errorc chan error + id types.ID + attributes membership.Attributes + + cluster *membership.RaftCluster + + store store.Store + snapshotter *snap.Snapshotter + + applyV2 ApplierV2 + + // applyV3 is the applier with auth and quotas + applyV3 applierV3 + // applyV3Base is the core applier without auth or quotas + applyV3Base applierV3 + applyWait wait.WaitTime + + kv mvcc.ConsistentWatchableKV + lessor lease.Lessor + bemu sync.Mutex + be backend.Backend + authStore auth.AuthStore + alarmStore *alarm.AlarmStore + + stats *stats.ServerStats + lstats *stats.LeaderStats + + SyncTicker *time.Ticker + // compactor is used to auto-compact the KV. + compactor *compactor.Periodic + + // peerRt used to send requests (version, lease) to peers. + peerRt http.RoundTripper + reqIDGen *idutil.Generator + + // forceVersionC is used to force the version monitor loop + // to detect the cluster version immediately. + forceVersionC chan struct{} + + // wgMu blocks concurrent waitgroup mutation while server stopping + wgMu sync.RWMutex + // wg is used to wait for the go routines that depends on the server state + // to exit when stopping the server. + wg sync.WaitGroup + + // ctx is used for etcd-initiated requests that may need to be canceled + // on etcd server shutdown. + ctx context.Context + cancel context.CancelFunc + + leadTimeMu sync.RWMutex + leadElectedTime time.Time +} + +// NewServer creates a new EtcdServer from the supplied configuration. The +// configuration is considered static for the lifetime of the EtcdServer. +func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { + st := store.New(StoreClusterPrefix, StoreKeysPrefix) + + var ( + w *wal.WAL + n raft.Node + s *raft.MemoryStorage + id types.ID + cl *membership.RaftCluster + ) + + if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { + return nil, fmt.Errorf("cannot access data directory: %v", terr) + } + + haveWAL := wal.Exist(cfg.WALDir()) + + if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil { + plog.Fatalf("create snapshot directory error: %v", err) + } + ss := snap.New(cfg.SnapDir()) + + bepath := cfg.backendPath() + beExist := fileutil.Exist(bepath) + be := openBackend(cfg) + + defer func() { + if err != nil { + be.Close() + } + }() + + prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout()) + if err != nil { + return nil, err + } + var ( + remotes []*membership.Member + snapshot *raftpb.Snapshot + ) + + switch { + case !haveWAL && !cfg.NewCluster: + if err = cfg.VerifyJoinExisting(); err != nil { + return nil, err + } + cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) + if err != nil { + return nil, err + } + existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt) + if gerr != nil { + return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr) + } + if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil { + return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) + } + if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) { + return nil, fmt.Errorf("incompatible with current running cluster") + } + + remotes = existingCluster.Members() + cl.SetID(existingCluster.ID()) + cl.SetStore(st) + cl.SetBackend(be) + cfg.Print() + id, n, s, w = startNode(cfg, cl, nil) + case !haveWAL && cfg.NewCluster: + if err = cfg.VerifyBootstrap(); err != nil { + return nil, err + } + cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) + if err != nil { + return nil, err + } + m := cl.MemberByName(cfg.Name) + if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) { + return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) + } + if cfg.ShouldDiscover() { + var str string + str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) + if err != nil { + return nil, &DiscoveryError{Op: "join", Err: err} + } + var urlsmap types.URLsMap + urlsmap, err = types.NewURLsMap(str) + if err != nil { + return nil, err + } + if checkDuplicateURL(urlsmap) { + return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) + } + if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil { + return nil, err + } + } + cl.SetStore(st) + cl.SetBackend(be) + cfg.PrintWithInitial() + id, n, s, w = startNode(cfg, cl, cl.MemberIDs()) + case haveWAL: + if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { + return nil, fmt.Errorf("cannot write to member directory: %v", err) + } + + if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil { + return nil, fmt.Errorf("cannot write to WAL directory: %v", err) + } + + if cfg.ShouldDiscover() { + plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir()) + } + snapshot, err = ss.Load() + if err != nil && err != snap.ErrNoSnapshot { + return nil, err + } + if snapshot != nil { + if err = st.Recovery(snapshot.Data); err != nil { + plog.Panicf("recovered store from snapshot error: %v", err) + } + plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) + if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil { + plog.Panicf("recovering backend from snapshot error: %v", err) + } + } + cfg.Print() + if !cfg.ForceNewCluster { + id, cl, n, s, w = restartNode(cfg, snapshot) + } else { + id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot) + } + cl.SetStore(st) + cl.SetBackend(be) + cl.Recover(api.UpdateCapability) + if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist { + os.RemoveAll(bepath) + return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath) + } + default: + return nil, fmt.Errorf("unsupported bootstrap config") + } + + if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil { + return nil, fmt.Errorf("cannot access member directory: %v", terr) + } + + sstats := stats.NewServerStats(cfg.Name, id.String()) + lstats := stats.NewLeaderStats(id.String()) + + heartbeat := time.Duration(cfg.TickMs) * time.Millisecond + srv = &EtcdServer{ + readych: make(chan struct{}), + Cfg: cfg, + snapCount: cfg.SnapCount, + errorc: make(chan error, 1), + store: st, + snapshotter: ss, + r: *newRaftNode( + raftNodeConfig{ + isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, + Node: n, + heartbeat: heartbeat, + raftStorage: s, + storage: NewStorage(w, ss), + }, + ), + id: id, + attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, + cluster: cl, + stats: sstats, + lstats: lstats, + SyncTicker: time.NewTicker(500 * time.Millisecond), + peerRt: prt, + reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), + forceVersionC: make(chan struct{}), + } + + srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster} + + srv.be = be + minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat + + // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. + // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. + srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds()))) + srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex) + if beExist { + kvindex := srv.kv.ConsistentIndex() + // TODO: remove kvindex != 0 checking when we do not expect users to upgrade + // etcd from pre-3.0 release. + if snapshot != nil && kvindex < snapshot.Metadata.Index { + if kvindex != 0 { + return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index) + } + plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) + } + } + newSrv := srv // since srv == nil in defer if srv is returned as nil + defer func() { + // closing backend without first closing kv can cause + // resumed compactions to fail with closed tx errors + if err != nil { + newSrv.kv.Close() + } + }() + + srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) + tp, err := auth.NewTokenProvider(cfg.AuthToken, + func(index uint64) <-chan struct{} { + return srv.applyWait.Wait(index) + }, + ) + if err != nil { + plog.Errorf("failed to create token provider: %s", err) + return nil, err + } + srv.authStore = auth.NewAuthStore(srv.be, tp) + if h := cfg.AutoCompactionRetention; h != 0 { + srv.compactor = compactor.NewPeriodic(h, srv.kv, srv) + srv.compactor.Run() + } + + srv.applyV3Base = &applierV3backend{srv} + if err = srv.restoreAlarms(); err != nil { + return nil, err + } + + // TODO: move transport initialization near the definition of remote + tr := &rafthttp.Transport{ + TLSInfo: cfg.PeerTLSInfo, + DialTimeout: cfg.peerDialTimeout(), + ID: id, + URLs: cfg.PeerURLs, + ClusterID: cl.ID(), + Raft: srv, + Snapshotter: ss, + ServerStats: sstats, + LeaderStats: lstats, + ErrorC: srv.errorc, + } + if err = tr.Start(); err != nil { + return nil, err + } + // add all remotes into transport + for _, m := range remotes { + if m.ID != id { + tr.AddRemote(m.ID, m.PeerURLs) + } + } + for _, m := range cl.Members() { + if m.ID != id { + tr.AddPeer(m.ID, m.PeerURLs) + } + } + srv.r.transport = tr + + return srv, nil +} + +// Start prepares and starts server in a new goroutine. It is no longer safe to +// modify a server's fields after it has been sent to Start. +// It also starts a goroutine to publish its server information. +func (s *EtcdServer) Start() { + s.start() + s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) }) + s.goAttach(s.purgeFile) + s.goAttach(func() { monitorFileDescriptor(s.stopping) }) + s.goAttach(s.monitorVersions) + s.goAttach(s.linearizableReadLoop) +} + +// start prepares and starts server in a new goroutine. It is no longer safe to +// modify a server's fields after it has been sent to Start. +// This function is just used for testing. +func (s *EtcdServer) start() { + if s.snapCount == 0 { + plog.Infof("set snapshot count to default %d", DefaultSnapCount) + s.snapCount = DefaultSnapCount + } + s.w = wait.New() + s.applyWait = wait.NewTimeList() + s.done = make(chan struct{}) + s.stop = make(chan struct{}) + s.stopping = make(chan struct{}) + s.ctx, s.cancel = context.WithCancel(context.Background()) + s.readwaitc = make(chan struct{}, 1) + s.readNotifier = newNotifier() + if s.ClusterVersion() != nil { + plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String())) + } else { + plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version) + } + // TODO: if this is an empty log, writes all peer infos + // into the first entry + go s.run() +} + +func (s *EtcdServer) purgeFile() { + var serrc, werrc <-chan error + if s.Cfg.MaxSnapFiles > 0 { + serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) + } + if s.Cfg.MaxWALFiles > 0 { + werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done) + } + select { + case e := <-werrc: + plog.Fatalf("failed to purge wal file %v", e) + case e := <-serrc: + plog.Fatalf("failed to purge snap file %v", e) + case <-s.stopping: + return + } +} + +func (s *EtcdServer) ID() types.ID { return s.id } + +func (s *EtcdServer) Cluster() *membership.RaftCluster { return s.cluster } + +func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() } + +func (s *EtcdServer) Lessor() lease.Lessor { return s.lessor } + +func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) } + +func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { + if s.cluster.IsIDRemoved(types.ID(m.From)) { + plog.Warningf("reject message from removed member %s", types.ID(m.From).String()) + return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member") + } + if m.Type == raftpb.MsgApp { + s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size()) + } + return s.r.Step(ctx, m) +} + +func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) } + +func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) } + +// ReportSnapshot reports snapshot sent status to the raft state machine, +// and clears the used snapshot from the snapshot store. +func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) { + s.r.ReportSnapshot(id, status) +} + +type etcdProgress struct { + confState raftpb.ConfState + snapi uint64 + appliedt uint64 + appliedi uint64 +} + +// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode, +// and helps decouple state machine logic from Raft algorithms. +// TODO: add a state machine interface to apply the commit entries and do snapshot/recover +type raftReadyHandler struct { + updateLeadership func(newLeader bool) + updateCommittedIndex func(uint64) +} + +func (s *EtcdServer) run() { + sn, err := s.r.raftStorage.Snapshot() + if err != nil { + plog.Panicf("get snapshot from raft storage error: %v", err) + } + + // asynchronously accept apply packets, dispatch progress in-order + sched := schedule.NewFIFOScheduler() + + var ( + smu sync.RWMutex + syncC <-chan time.Time + ) + setSyncC := func(ch <-chan time.Time) { + smu.Lock() + syncC = ch + smu.Unlock() + } + getSyncC := func() (ch <-chan time.Time) { + smu.RLock() + ch = syncC + smu.RUnlock() + return + } + rh := &raftReadyHandler{ + updateLeadership: func(newLeader bool) { + if !s.isLeader() { + if s.lessor != nil { + s.lessor.Demote() + } + if s.compactor != nil { + s.compactor.Pause() + } + setSyncC(nil) + } else { + if newLeader { + t := time.Now() + s.leadTimeMu.Lock() + s.leadElectedTime = t + s.leadTimeMu.Unlock() + } + setSyncC(s.SyncTicker.C) + if s.compactor != nil { + s.compactor.Resume() + } + } + + // TODO: remove the nil checking + // current test utility does not provide the stats + if s.stats != nil { + s.stats.BecomeLeader() + } + }, + updateCommittedIndex: func(ci uint64) { + cci := s.getCommittedIndex() + if ci > cci { + s.setCommittedIndex(ci) + } + }, + } + s.r.start(rh) + + ep := etcdProgress{ + confState: sn.Metadata.ConfState, + snapi: sn.Metadata.Index, + appliedt: sn.Metadata.Term, + appliedi: sn.Metadata.Index, + } + + defer func() { + s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping + close(s.stopping) + s.wgMu.Unlock() + s.cancel() + + sched.Stop() + + // wait for gouroutines before closing raft so wal stays open + s.wg.Wait() + + s.SyncTicker.Stop() + + // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines + // by adding a peer after raft stops the transport + s.r.stop() + + // kv, lessor and backend can be nil if running without v3 enabled + // or running unit tests. + if s.lessor != nil { + s.lessor.Stop() + } + if s.kv != nil { + s.kv.Close() + } + if s.authStore != nil { + s.authStore.Close() + } + if s.be != nil { + s.be.Close() + } + if s.compactor != nil { + s.compactor.Stop() + } + close(s.done) + }() + + var expiredLeaseC <-chan []*lease.Lease + if s.lessor != nil { + expiredLeaseC = s.lessor.ExpiredLeasesC() + } + + for { + select { + case ap := <-s.r.apply(): + f := func(context.Context) { s.applyAll(&ep, &ap) } + sched.Schedule(f) + case leases := <-expiredLeaseC: + s.goAttach(func() { + // Increases throughput of expired leases deletion process through parallelization + c := make(chan struct{}, maxPendingRevokes) + for _, lease := range leases { + select { + case c <- struct{}{}: + case <-s.stopping: + return + } + lid := lease.ID + s.goAttach(func() { + s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) + leaseExpired.Inc() + <-c + }) + } + }) + case err := <-s.errorc: + plog.Errorf("%s", err) + plog.Infof("the data-dir used by this member must be removed.") + return + case <-getSyncC(): + if s.store.HasTTLKeys() { + s.sync(s.Cfg.ReqTimeout()) + } + case <-s.stop: + return + } + } +} + +func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { + s.applySnapshot(ep, apply) + st := time.Now() + s.applyEntries(ep, apply) + d := time.Since(st) + entriesNum := len(apply.entries) + if entriesNum != 0 && d > time.Duration(entriesNum)*warnApplyDuration { + plog.Warningf("apply entries took too long [%v for %d entries]", d, len(apply.entries)) + plog.Warningf("avoid queries with large range/delete range!") + } + proposalsApplied.Set(float64(ep.appliedi)) + s.applyWait.Trigger(ep.appliedi) + // wait for the raft routine to finish the disk writes before triggering a + // snapshot. or applied index might be greater than the last index in raft + // storage, since the raft routine might be slower than apply routine. + <-apply.notifyc + + s.triggerSnapshot(ep) + select { + // snapshot requested via send() + case m := <-s.r.msgSnapC: + merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState) + s.sendMergedSnap(merged) + default: + } +} + +func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { + if raft.IsEmptySnap(apply.snapshot) { + return + } + + plog.Infof("applying snapshot at index %d...", ep.snapi) + defer plog.Infof("finished applying incoming snapshot at index %d", ep.snapi) + + if apply.snapshot.Metadata.Index <= ep.appliedi { + plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1", + apply.snapshot.Metadata.Index, ep.appliedi) + } + + // wait for raftNode to persist snapshot onto the disk + <-apply.notifyc + + newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot) + if err != nil { + plog.Panic(err) + } + + // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. + // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. + if s.lessor != nil { + plog.Info("recovering lessor...") + s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() }) + plog.Info("finished recovering lessor") + } + + plog.Info("restoring mvcc store...") + + if err := s.kv.Restore(newbe); err != nil { + plog.Panicf("restore KV error: %v", err) + } + s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex()) + + plog.Info("finished restoring mvcc store") + + // Closing old backend might block until all the txns + // on the backend are finished. + // We do not want to wait on closing the old backend. + s.bemu.Lock() + oldbe := s.be + go func() { + plog.Info("closing old backend...") + defer plog.Info("finished closing old backend") + + if err := oldbe.Close(); err != nil { + plog.Panicf("close backend error: %v", err) + } + }() + + s.be = newbe + s.bemu.Unlock() + + plog.Info("recovering alarms...") + if err := s.restoreAlarms(); err != nil { + plog.Panicf("restore alarms error: %v", err) + } + plog.Info("finished recovering alarms") + + if s.authStore != nil { + plog.Info("recovering auth store...") + s.authStore.Recover(newbe) + plog.Info("finished recovering auth store") + } + + plog.Info("recovering store v2...") + if err := s.store.Recovery(apply.snapshot.Data); err != nil { + plog.Panicf("recovery store error: %v", err) + } + plog.Info("finished recovering store v2") + + s.cluster.SetBackend(s.be) + plog.Info("recovering cluster configuration...") + s.cluster.Recover(api.UpdateCapability) + plog.Info("finished recovering cluster configuration") + + plog.Info("removing old peers from network...") + // recover raft transport + s.r.transport.RemoveAllPeers() + plog.Info("finished removing old peers from network") + + plog.Info("adding peers from new cluster configuration into network...") + for _, m := range s.cluster.Members() { + if m.ID == s.ID() { + continue + } + s.r.transport.AddPeer(m.ID, m.PeerURLs) + } + plog.Info("finished adding peers from new cluster configuration into network...") + + ep.appliedt = apply.snapshot.Metadata.Term + ep.appliedi = apply.snapshot.Metadata.Index + ep.snapi = ep.appliedi + ep.confState = apply.snapshot.Metadata.ConfState +} + +func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { + if len(apply.entries) == 0 { + return + } + firsti := apply.entries[0].Index + if firsti > ep.appliedi+1 { + plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi) + } + var ents []raftpb.Entry + if ep.appliedi+1-firsti < uint64(len(apply.entries)) { + ents = apply.entries[ep.appliedi+1-firsti:] + } + if len(ents) == 0 { + return + } + var shouldstop bool + if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop { + go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster")) + } +} + +func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { + if ep.appliedi-ep.snapi <= s.snapCount { + return + } + + plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi) + s.snapshot(ep.appliedi, ep.confState) + ep.snapi = ep.appliedi +} + +func (s *EtcdServer) isMultiNode() bool { + return s.cluster != nil && len(s.cluster.MemberIDs()) > 1 +} + +func (s *EtcdServer) isLeader() bool { + return uint64(s.ID()) == s.Lead() +} + +// transferLeadership transfers the leader to the given transferee. +// TODO: maybe expose to client? +func (s *EtcdServer) transferLeadership(ctx context.Context, lead, transferee uint64) error { + now := time.Now() + interval := time.Duration(s.Cfg.TickMs) * time.Millisecond + + plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee)) + s.r.TransferLeadership(ctx, lead, transferee) + for s.Lead() != transferee { + select { + case <-ctx.Done(): // time out + return ErrTimeoutLeaderTransfer + case <-time.After(interval): + } + } + + // TODO: drain all requests, or drop all messages to the old leader + + plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now)) + return nil +} + +// TransferLeadership transfers the leader to the chosen transferee. +func (s *EtcdServer) TransferLeadership() error { + if !s.isLeader() { + plog.Printf("skipped leadership transfer for stopping non-leader member") + return nil + } + + if !s.isMultiNode() { + plog.Printf("skipped leadership transfer for single member cluster") + return nil + } + + transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs()) + if !ok { + return ErrUnhealthy + } + + tm := s.Cfg.ReqTimeout() + ctx, cancel := context.WithTimeout(s.ctx, tm) + err := s.transferLeadership(ctx, s.Lead(), uint64(transferee)) + cancel() + return err +} + +// HardStop stops the server without coordination with other members in the cluster. +func (s *EtcdServer) HardStop() { + select { + case s.stop <- struct{}{}: + case <-s.done: + return + } + <-s.done +} + +// Stop stops the server gracefully, and shuts down the running goroutine. +// Stop should be called after a Start(s), otherwise it will block forever. +// When stopping leader, Stop transfers its leadership to one of its peers +// before stopping the server. +func (s *EtcdServer) Stop() { + if err := s.TransferLeadership(); err != nil { + plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err) + } + s.HardStop() +} + +// ReadyNotify returns a channel that will be closed when the server +// is ready to serve client requests +func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych } + +func (s *EtcdServer) stopWithDelay(d time.Duration, err error) { + select { + case <-time.After(d): + case <-s.done: + } + select { + case s.errorc <- err: + default: + } +} + +// StopNotify returns a channel that receives a empty struct +// when the server is stopped. +func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done } + +func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() } + +func (s *EtcdServer) LeaderStats() []byte { + lead := atomic.LoadUint64(&s.r.lead) + if lead != uint64(s.id) { + return nil + } + return s.lstats.JSON() +} + +func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() } + +func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { + if s.authStore == nil { + // In the context of ordinary etcd process, s.authStore will never be nil. + // This branch is for handling cases in server_test.go + return nil + } + + // Note that this permission check is done in the API layer, + // so TOCTOU problem can be caused potentially in a schedule like this: + // update membership with user A -> revoke root role of A -> apply membership change + // in the state machine layer + // However, both of membership change and role management requires the root privilege. + // So careful operation by admins can prevent the problem. + authInfo, err := s.AuthInfoFromCtx(ctx) + if err != nil { + return err + } + + return s.AuthStore().IsAdminPermitted(authInfo) +} + +func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { + if err := s.checkMembershipOperationPermission(ctx); err != nil { + return nil, err + } + + if s.Cfg.StrictReconfigCheck { + // by default StrictReconfigCheck is enabled; reject new members if unhealthy + if !s.cluster.IsReadyToAddNewMember() { + plog.Warningf("not enough started members, rejecting member add %+v", memb) + return nil, ErrNotEnoughStartedMembers + } + if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) { + plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) + return nil, ErrUnhealthy + } + } + + // TODO: move Member to protobuf type + b, err := json.Marshal(memb) + if err != nil { + return nil, err + } + cc := raftpb.ConfChange{ + Type: raftpb.ConfChangeAddNode, + NodeID: uint64(memb.ID), + Context: b, + } + return s.configure(ctx, cc) +} + +func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { + if err := s.checkMembershipOperationPermission(ctx); err != nil { + return nil, err + } + + // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss + if err := s.mayRemoveMember(types.ID(id)); err != nil { + return nil, err + } + + cc := raftpb.ConfChange{ + Type: raftpb.ConfChangeRemoveNode, + NodeID: id, + } + return s.configure(ctx, cc) +} + +func (s *EtcdServer) mayRemoveMember(id types.ID) error { + if !s.Cfg.StrictReconfigCheck { + return nil + } + + if !s.cluster.IsReadyToRemoveMember(uint64(id)) { + plog.Warningf("not enough started members, rejecting remove member %s", id) + return ErrNotEnoughStartedMembers + } + + // downed member is safe to remove since it's not part of the active quorum + if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() { + return nil + } + + // protect quorum if some members are down + m := s.cluster.Members() + active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m) + if (active - 1) < 1+((len(m)-1)/2) { + plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id) + return ErrUnhealthy + } + + return nil +} + +func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { + b, merr := json.Marshal(memb) + if merr != nil { + return nil, merr + } + + if err := s.checkMembershipOperationPermission(ctx); err != nil { + return nil, err + } + cc := raftpb.ConfChange{ + Type: raftpb.ConfChangeUpdateNode, + NodeID: uint64(memb.ID), + Context: b, + } + return s.configure(ctx, cc) +} + +// Implement the RaftTimer interface + +func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.r.index) } + +func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.r.term) } + +// Lead is only for testing purposes. +// TODO: add Raft server interface to expose raft related info: +// Index, Term, Lead, Committed, Applied, LastIndex, etc. +func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) } + +func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) } + +type confChangeResponse struct { + membs []*membership.Member + err error +} + +// configure sends a configuration change through consensus and +// then waits for it to be applied to the server. It +// will block until the change is performed or there is an error. +func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) { + cc.ID = s.reqIDGen.Next() + ch := s.w.Register(cc.ID) + start := time.Now() + if err := s.r.ProposeConfChange(ctx, cc); err != nil { + s.w.Trigger(cc.ID, nil) + return nil, err + } + select { + case x := <-ch: + if x == nil { + plog.Panicf("configure trigger value should never be nil") + } + resp := x.(*confChangeResponse) + return resp.membs, resp.err + case <-ctx.Done(): + s.w.Trigger(cc.ID, nil) // GC wait + return nil, s.parseProposeCtxErr(ctx.Err(), start) + case <-s.stopping: + return nil, ErrStopped + } +} + +// sync proposes a SYNC request and is non-blocking. +// This makes no guarantee that the request will be proposed or performed. +// The request will be canceled after the given timeout. +func (s *EtcdServer) sync(timeout time.Duration) { + req := pb.Request{ + Method: "SYNC", + ID: s.reqIDGen.Next(), + Time: time.Now().UnixNano(), + } + data := pbutil.MustMarshal(&req) + // There is no promise that node has leader when do SYNC request, + // so it uses goroutine to propose. + ctx, cancel := context.WithTimeout(s.ctx, timeout) + s.goAttach(func() { + s.r.Propose(ctx, data) + cancel() + }) +} + +// publish registers server information into the cluster. The information +// is the JSON representation of this server's member struct, updated with the +// static clientURLs of the server. +// The function keeps attempting to register until it succeeds, +// or its server is stopped. +func (s *EtcdServer) publish(timeout time.Duration) { + b, err := json.Marshal(s.attributes) + if err != nil { + plog.Panicf("json marshal error: %v", err) + return + } + req := pb.Request{ + Method: "PUT", + Path: membership.MemberAttributesStorePath(s.id), + Val: string(b), + } + + for { + ctx, cancel := context.WithTimeout(s.ctx, timeout) + _, err := s.Do(ctx, req) + cancel() + switch err { + case nil: + close(s.readych) + plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID()) + return + case ErrStopped: + plog.Infof("aborting publish because server is stopped") + return + default: + plog.Errorf("publish error: %v", err) + } + } +} + +func (s *EtcdServer) sendMergedSnap(merged snap.Message) { + atomic.AddInt64(&s.inflightSnapshots, 1) + + s.r.transport.SendSnapshot(merged) + s.goAttach(func() { + select { + case ok := <-merged.CloseNotify(): + // delay releasing inflight snapshot for another 30 seconds to + // block log compaction. + // If the follower still fails to catch up, it is probably just too slow + // to catch up. We cannot avoid the snapshot cycle anyway. + if ok { + select { + case <-time.After(releaseDelayAfterSnapshot): + case <-s.stopping: + } + } + atomic.AddInt64(&s.inflightSnapshots, -1) + case <-s.stopping: + return + } + }) +} + +// apply takes entries received from Raft (after it has been committed) and +// applies them to the current state of the EtcdServer. +// The given entries should not be empty. +func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) { + for i := range es { + e := es[i] + switch e.Type { + case raftpb.EntryNormal: + s.applyEntryNormal(&e) + case raftpb.EntryConfChange: + // set the consistent index of current executing entry + if e.Index > s.consistIndex.ConsistentIndex() { + s.consistIndex.setConsistentIndex(e.Index) + } + var cc raftpb.ConfChange + pbutil.MustUnmarshal(&cc, e.Data) + removedSelf, err := s.applyConfChange(cc, confState) + s.setAppliedIndex(e.Index) + shouldStop = shouldStop || removedSelf + s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) + default: + plog.Panicf("entry type should be either EntryNormal or EntryConfChange") + } + atomic.StoreUint64(&s.r.index, e.Index) + atomic.StoreUint64(&s.r.term, e.Term) + appliedt = e.Term + appliedi = e.Index + } + return appliedt, appliedi, shouldStop +} + +// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer +func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { + shouldApplyV3 := false + if e.Index > s.consistIndex.ConsistentIndex() { + // set the consistent index of current executing entry + s.consistIndex.setConsistentIndex(e.Index) + shouldApplyV3 = true + } + defer s.setAppliedIndex(e.Index) + + // raft state machine may generate noop entry when leader confirmation. + // skip it in advance to avoid some potential bug in the future + if len(e.Data) == 0 { + select { + case s.forceVersionC <- struct{}{}: + default: + } + // promote lessor when the local member is leader and finished + // applying all entries from the last term. + if s.isLeader() { + s.lessor.Promote(s.Cfg.electionTimeout()) + } + return + } + + var raftReq pb.InternalRaftRequest + if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible + var r pb.Request + pbutil.MustUnmarshal(&r, e.Data) + s.w.Trigger(r.ID, s.applyV2Request(&r)) + return + } + if raftReq.V2 != nil { + req := raftReq.V2 + s.w.Trigger(req.ID, s.applyV2Request(req)) + return + } + + // do not re-apply applied entries. + if !shouldApplyV3 { + return + } + + id := raftReq.ID + if id == 0 { + id = raftReq.Header.ID + } + + var ar *applyResult + needResult := s.w.IsRegistered(id) + if needResult || !noSideEffect(&raftReq) { + if !needResult && raftReq.Txn != nil { + removeNeedlessRangeReqs(raftReq.Txn) + } + ar = s.applyV3.Apply(&raftReq) + } + + if ar == nil { + return + } + + if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { + s.w.Trigger(id, ar) + return + } + + plog.Errorf("applying raft message exceeded backend quota") + s.goAttach(func() { + a := &pb.AlarmRequest{ + MemberID: uint64(s.ID()), + Action: pb.AlarmRequest_ACTIVATE, + Alarm: pb.AlarmType_NOSPACE, + } + s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) + s.w.Trigger(id, ar) + }) +} + +// applyConfChange applies a ConfChange to the server. It is only +// invoked with a ConfChange that has already passed through Raft +func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) { + if err := s.cluster.ValidateConfigurationChange(cc); err != nil { + cc.NodeID = raft.None + s.r.ApplyConfChange(cc) + return false, err + } + *confState = *s.r.ApplyConfChange(cc) + switch cc.Type { + case raftpb.ConfChangeAddNode: + m := new(membership.Member) + if err := json.Unmarshal(cc.Context, m); err != nil { + plog.Panicf("unmarshal member should never fail: %v", err) + } + if cc.NodeID != uint64(m.ID) { + plog.Panicf("nodeID should always be equal to member ID") + } + s.cluster.AddMember(m) + if m.ID != s.id { + s.r.transport.AddPeer(m.ID, m.PeerURLs) + } + case raftpb.ConfChangeRemoveNode: + id := types.ID(cc.NodeID) + s.cluster.RemoveMember(id) + if id == s.id { + return true, nil + } + s.r.transport.RemovePeer(id) + case raftpb.ConfChangeUpdateNode: + m := new(membership.Member) + if err := json.Unmarshal(cc.Context, m); err != nil { + plog.Panicf("unmarshal member should never fail: %v", err) + } + if cc.NodeID != uint64(m.ID) { + plog.Panicf("nodeID should always be equal to member ID") + } + s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes) + if m.ID != s.id { + s.r.transport.UpdatePeer(m.ID, m.PeerURLs) + } + } + return false, nil +} + +// TODO: non-blocking snapshot +func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { + clone := s.store.Clone() + // commit kv to write metadata (for example: consistent index) to disk. + // KV().commit() updates the consistent index in backend. + // All operations that update consistent index must be called sequentially + // from applyAll function. + // So KV().Commit() cannot run in parallel with apply. It has to be called outside + // the go routine created below. + s.KV().Commit() + + s.goAttach(func() { + d, err := clone.SaveNoCopy() + // TODO: current store will never fail to do a snapshot + // what should we do if the store might fail? + if err != nil { + plog.Panicf("store save should never fail: %v", err) + } + snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d) + if err != nil { + // the snapshot was done asynchronously with the progress of raft. + // raft might have already got a newer snapshot. + if err == raft.ErrSnapOutOfDate { + return + } + plog.Panicf("unexpected create snapshot error %v", err) + } + // SaveSnap saves the snapshot and releases the locked wal files + // to the snapshot index. + if err = s.r.storage.SaveSnap(snap); err != nil { + plog.Fatalf("save snapshot error: %v", err) + } + plog.Infof("saved snapshot at index %d", snap.Metadata.Index) + + // When sending a snapshot, etcd will pause compaction. + // After receives a snapshot, the slow follower needs to get all the entries right after + // the snapshot sent to catch up. If we do not pause compaction, the log entries right after + // the snapshot sent might already be compacted. It happens when the snapshot takes long time + // to send and save. Pausing compaction avoids triggering a snapshot sending cycle. + if atomic.LoadInt64(&s.inflightSnapshots) != 0 { + plog.Infof("skip compaction since there is an inflight snapshot") + return + } + + // keep some in memory log entries for slow followers. + compacti := uint64(1) + if snapi > numberOfCatchUpEntries { + compacti = snapi - numberOfCatchUpEntries + } + err = s.r.raftStorage.Compact(compacti) + if err != nil { + // the compaction was done asynchronously with the progress of raft. + // raft log might already been compact. + if err == raft.ErrCompacted { + return + } + plog.Panicf("unexpected compaction error %v", err) + } + plog.Infof("compacted raft log at %d", compacti) + }) +} + +// CutPeer drops messages to the specified peer. +func (s *EtcdServer) CutPeer(id types.ID) { + tr, ok := s.r.transport.(*rafthttp.Transport) + if ok { + tr.CutPeer(id) + } +} + +// MendPeer recovers the message dropping behavior of the given peer. +func (s *EtcdServer) MendPeer(id types.ID) { + tr, ok := s.r.transport.(*rafthttp.Transport) + if ok { + tr.MendPeer(id) + } +} + +func (s *EtcdServer) PauseSending() { s.r.pauseSending() } + +func (s *EtcdServer) ResumeSending() { s.r.resumeSending() } + +func (s *EtcdServer) ClusterVersion() *semver.Version { + if s.cluster == nil { + return nil + } + return s.cluster.Version() +} + +// monitorVersions checks the member's version every monitorVersionInterval. +// It updates the cluster version if all members agrees on a higher one. +// It prints out log if there is a member with a higher version than the +// local version. +func (s *EtcdServer) monitorVersions() { + for { + select { + case <-s.forceVersionC: + case <-time.After(monitorVersionInterval): + case <-s.stopping: + return + } + + if s.Leader() != s.ID() { + continue + } + + v := decideClusterVersion(getVersions(s.cluster, s.id, s.peerRt)) + if v != nil { + // only keep major.minor version for comparison + v = &semver.Version{ + Major: v.Major, + Minor: v.Minor, + } + } + + // if the current version is nil: + // 1. use the decided version if possible + // 2. or use the min cluster version + if s.cluster.Version() == nil { + verStr := version.MinClusterVersion + if v != nil { + verStr = v.String() + } + s.goAttach(func() { s.updateClusterVersion(verStr) }) + continue + } + + // update cluster version only if the decided version is greater than + // the current cluster version + if v != nil && s.cluster.Version().LessThan(*v) { + s.goAttach(func() { s.updateClusterVersion(v.String()) }) + } + } +} + +func (s *EtcdServer) updateClusterVersion(ver string) { + if s.cluster.Version() == nil { + plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver)) + } else { + plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver)) + } + req := pb.Request{ + Method: "PUT", + Path: membership.StoreClusterVersionKey(), + Val: ver, + } + ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) + _, err := s.Do(ctx, req) + cancel() + switch err { + case nil: + return + case ErrStopped: + plog.Infof("aborting update cluster version because server is stopped") + return + default: + plog.Errorf("error updating cluster version (%v)", err) + } +} + +func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { + switch err { + case context.Canceled: + return ErrCanceled + case context.DeadlineExceeded: + s.leadTimeMu.RLock() + curLeadElected := s.leadElectedTime + s.leadTimeMu.RUnlock() + prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) + if start.After(prevLeadLost) && start.Before(curLeadElected) { + return ErrTimeoutDueToLeaderFail + } + + lead := types.ID(atomic.LoadUint64(&s.r.lead)) + switch lead { + case types.ID(raft.None): + // TODO: return error to specify it happens because the cluster does not have leader now + case s.ID(): + if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) { + return ErrTimeoutDueToConnectionLost + } + default: + if !isConnectedSince(s.r.transport, start, lead) { + return ErrTimeoutDueToConnectionLost + } + } + + return ErrTimeout + default: + return err + } +} + +func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv } +func (s *EtcdServer) Backend() backend.Backend { + s.bemu.Lock() + defer s.bemu.Unlock() + return s.be +} + +func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore } + +func (s *EtcdServer) restoreAlarms() error { + s.applyV3 = s.newApplierV3() + as, err := alarm.NewAlarmStore(s) + if err != nil { + return err + } + s.alarmStore = as + if len(as.Get(pb.AlarmType_NOSPACE)) > 0 { + s.applyV3 = newApplierV3Capped(s.applyV3) + } + return nil +} + +func (s *EtcdServer) getAppliedIndex() uint64 { + return atomic.LoadUint64(&s.appliedIndex) +} + +func (s *EtcdServer) setAppliedIndex(v uint64) { + atomic.StoreUint64(&s.appliedIndex, v) +} + +func (s *EtcdServer) getCommittedIndex() uint64 { + return atomic.LoadUint64(&s.committedIndex) +} + +func (s *EtcdServer) setCommittedIndex(v uint64) { + atomic.StoreUint64(&s.committedIndex, v) +} + +// goAttach creates a goroutine on a given function and tracks it using +// the etcdserver waitgroup. +func (s *EtcdServer) goAttach(f func()) { + s.wgMu.RLock() // this blocks with ongoing close(s.stopping) + defer s.wgMu.RUnlock() + select { + case <-s.stopping: + plog.Warning("server has stopped (skipping goAttach)") + return + default: + } + + // now safe to add since waitgroup wait has not started yet + s.wg.Add(1) + go func() { + defer s.wg.Done() + f() + }() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go new file mode 100644 index 000000000..928aa95b6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go @@ -0,0 +1,73 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "io" + + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" +) + +// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf), +// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message +// as ReadCloser. +func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message { + // get a snapshot of v2 store as []byte + clone := s.store.Clone() + d, err := clone.SaveNoCopy() + if err != nil { + plog.Panicf("store save should never fail: %v", err) + } + + // commit kv to write metadata(for example: consistent index). + s.KV().Commit() + dbsnap := s.be.Snapshot() + // get a snapshot of v3 KV as readCloser + rc := newSnapshotReaderCloser(dbsnap) + + // put the []byte snapshot of store into raft snapshot and return the merged snapshot with + // KV readCloser snapshot. + snapshot := raftpb.Snapshot{ + Metadata: raftpb.SnapshotMetadata{ + Index: snapi, + Term: snapt, + ConfState: confState, + }, + Data: d, + } + m.Snapshot = snapshot + + return *snap.NewMessage(m, rc, dbsnap.Size()) +} + +func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser { + pr, pw := io.Pipe() + go func() { + n, err := snapshot.WriteTo(pw) + if err == nil { + plog.Infof("wrote database snapshot out [total bytes: %d]", n) + } else { + plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) + } + pw.CloseWithError(err) + err = snapshot.Close() + if err != nil { + plog.Panicf("failed to close database snapshot: %v", err) + } + }() + return pr +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go new file mode 100644 index 000000000..aa8f87569 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/storage.go @@ -0,0 +1,98 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "io" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" + "github.com/coreos/etcd/wal" + "github.com/coreos/etcd/wal/walpb" +) + +type Storage interface { + // Save function saves ents and state to the underlying stable storage. + // Save MUST block until st and ents are on stable storage. + Save(st raftpb.HardState, ents []raftpb.Entry) error + // SaveSnap function saves snapshot to the underlying stable storage. + SaveSnap(snap raftpb.Snapshot) error + // Close closes the Storage and performs finalization. + Close() error +} + +type storage struct { + *wal.WAL + *snap.Snapshotter +} + +func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage { + return &storage{w, s} +} + +// SaveSnap saves the snapshot to disk and release the locked +// wal files since they will not be used. +func (st *storage) SaveSnap(snap raftpb.Snapshot) error { + walsnap := walpb.Snapshot{ + Index: snap.Metadata.Index, + Term: snap.Metadata.Term, + } + err := st.WAL.SaveSnapshot(walsnap) + if err != nil { + return err + } + err = st.Snapshotter.SaveSnap(snap) + if err != nil { + return err + } + return st.WAL.ReleaseLockTo(snap.Metadata.Index) +} + +func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) { + var ( + err error + wmetadata []byte + ) + + repaired := false + for { + if w, err = wal.Open(waldir, snap); err != nil { + plog.Fatalf("open wal error: %v", err) + } + if wmetadata, st, ents, err = w.ReadAll(); err != nil { + w.Close() + // we can only repair ErrUnexpectedEOF and we never repair twice. + if repaired || err != io.ErrUnexpectedEOF { + plog.Fatalf("read wal error (%v) and cannot be repaired", err) + } + if !wal.Repair(waldir) { + plog.Fatalf("WAL error (%v) cannot be repaired", err) + } else { + plog.Infof("repaired WAL error (%v)", err) + repaired = true + } + continue + } + break + } + var metadata pb.Metadata + pbutil.MustUnmarshal(&metadata, wmetadata) + id = types.ID(metadata.NodeID) + cid = types.ID(metadata.ClusterID) + return +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go new file mode 100644 index 000000000..e3896ffc2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/util.go @@ -0,0 +1,97 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "time" + + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/rafthttp" +) + +// isConnectedToQuorumSince checks whether the local member is connected to the +// quorum of the cluster since the given time. +func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { + return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1 +} + +// isConnectedSince checks whether the local member is connected to the +// remote member since the given time. +func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool { + t := transport.ActiveSince(remote) + return !t.IsZero() && t.Before(since) +} + +// isConnectedFullySince checks whether the local member is connected to all +// members in the cluster since the given time. +func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { + return numConnectedSince(transport, since, self, members) == len(members) +} + +// numConnectedSince counts how many members are connected to the local member +// since the given time. +func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int { + connectedNum := 0 + for _, m := range members { + if m.ID == self || isConnectedSince(transport, since, m.ID) { + connectedNum++ + } + } + return connectedNum +} + +// longestConnected chooses the member with longest active-since-time. +// It returns false, if nothing is active. +func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) { + var longest types.ID + var oldest time.Time + for _, id := range membs { + tm := tp.ActiveSince(id) + if tm.IsZero() { // inactive + continue + } + + if oldest.IsZero() { // first longest candidate + oldest = tm + longest = id + } + + if tm.Before(oldest) { + oldest = tm + longest = id + } + } + if uint64(longest) == 0 { + return longest, false + } + return longest, true +} + +type notifier struct { + c chan struct{} + err error +} + +func newNotifier() *notifier { + return ¬ifier{ + c: make(chan struct{}), + } +} + +func (nc *notifier) notify(err error) { + nc.err = err + close(nc.c) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go new file mode 100644 index 000000000..72c4eb7c5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go @@ -0,0 +1,125 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "time" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" +) + +type v2API interface { + Post(ctx context.Context, r *pb.Request) (Response, error) + Put(ctx context.Context, r *pb.Request) (Response, error) + Delete(ctx context.Context, r *pb.Request) (Response, error) + QGet(ctx context.Context, r *pb.Request) (Response, error) + Get(ctx context.Context, r *pb.Request) (Response, error) + Head(ctx context.Context, r *pb.Request) (Response, error) +} + +type v2apiStore struct{ s *EtcdServer } + +func (a *v2apiStore) Post(ctx context.Context, r *pb.Request) (Response, error) { + return a.processRaftRequest(ctx, r) +} + +func (a *v2apiStore) Put(ctx context.Context, r *pb.Request) (Response, error) { + return a.processRaftRequest(ctx, r) +} + +func (a *v2apiStore) Delete(ctx context.Context, r *pb.Request) (Response, error) { + return a.processRaftRequest(ctx, r) +} + +func (a *v2apiStore) QGet(ctx context.Context, r *pb.Request) (Response, error) { + return a.processRaftRequest(ctx, r) +} + +func (a *v2apiStore) processRaftRequest(ctx context.Context, r *pb.Request) (Response, error) { + data, err := r.Marshal() + if err != nil { + return Response{}, err + } + ch := a.s.w.Register(r.ID) + + start := time.Now() + a.s.r.Propose(ctx, data) + proposalsPending.Inc() + defer proposalsPending.Dec() + + select { + case x := <-ch: + resp := x.(Response) + return resp, resp.err + case <-ctx.Done(): + proposalsFailed.Inc() + a.s.w.Trigger(r.ID, nil) // GC wait + return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start) + case <-a.s.stopping: + } + return Response{}, ErrStopped +} + +func (a *v2apiStore) Get(ctx context.Context, r *pb.Request) (Response, error) { + if r.Wait { + wc, err := a.s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since) + if err != nil { + return Response{}, err + } + return Response{Watcher: wc}, nil + } + ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted) + if err != nil { + return Response{}, err + } + return Response{Event: ev}, nil +} + +func (a *v2apiStore) Head(ctx context.Context, r *pb.Request) (Response, error) { + ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted) + if err != nil { + return Response{}, err + } + return Response{Event: ev}, nil +} + +// Do interprets r and performs an operation on s.store according to r.Method +// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with +// Quorum == true, r will be sent through consensus before performing its +// respective operation. Do will block until an action is performed or there is +// an error. +func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { + r.ID = s.reqIDGen.Next() + if r.Method == "GET" && r.Quorum { + r.Method = "QGET" + } + v2api := (v2API)(&v2apiStore{s}) + switch r.Method { + case "POST": + return v2api.Post(ctx, &r) + case "PUT": + return v2api.Put(ctx, &r) + case "DELETE": + return v2api.Delete(ctx, &r) + case "QGET": + return v2api.QGet(ctx, &r) + case "GET": + return v2api.Get(ctx, &r) + case "HEAD": + return v2api.Head(ctx, &r) + } + return Response{}, ErrUnknownMethod +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go new file mode 100644 index 000000000..870f98e06 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go @@ -0,0 +1,692 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "bytes" + "encoding/binary" + "time" + + "github.com/gogo/protobuf/proto" + + "github.com/coreos/etcd/auth" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/etcdserver/membership" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/lease/leasehttp" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/raft" + + "golang.org/x/net/context" +) + +const ( + // the max request size that raft accepts. + // TODO: make this a flag? But we probably do not want to + // accept large request which might block raft stream. User + // specify a large value might end up with shooting in the foot. + maxRequestBytes = 1.5 * 1024 * 1024 + + // In the health case, there might be a small gap (10s of entries) between + // the applied index and committed index. + // However, if the committed entries are very heavy to apply, the gap might grow. + // We should stop accepting new proposals if the gap growing to a certain point. + maxGapBetweenApplyAndCommitIndex = 5000 +) + +type RaftKV interface { + Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) + Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) + DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) + Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) + Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) +} + +type Lessor interface { + // LeaseGrant sends LeaseGrant request to raft and apply it after committed. + LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) + // LeaseRevoke sends LeaseRevoke request to raft and apply it after committed. + LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) + + // LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error + // is returned. + LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) + + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) +} + +type Authenticator interface { + AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) + AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) + Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) + UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) + UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) + UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) + UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) + UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) + UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) + RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) + RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) + RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) + RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) + RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) + UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) + RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) +} + +func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { + if !r.Serializable { + err := s.linearizableReadNotify(ctx) + if err != nil { + return nil, err + } + } + var resp *pb.RangeResponse + var err error + chk := func(ai *auth.AuthInfo) error { + return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) + } + get := func() { resp, err = s.applyV3Base.Range(nil, r) } + if serr := s.doSerialize(ctx, chk, get); serr != nil { + return nil, serr + } + return resp, err +} + +func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r}) + if err != nil { + return nil, err + } + return resp.(*pb.PutResponse), nil +} + +func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) + if err != nil { + return nil, err + } + return resp.(*pb.DeleteRangeResponse), nil +} + +func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { + if isTxnReadonly(r) { + if !isTxnSerializable(r) { + err := s.linearizableReadNotify(ctx) + if err != nil { + return nil, err + } + } + var resp *pb.TxnResponse + var err error + chk := func(ai *auth.AuthInfo) error { + return checkTxnAuth(s.authStore, ai, r) + } + get := func() { resp, err = s.applyV3Base.Txn(r) } + if serr := s.doSerialize(ctx, chk, get); serr != nil { + return nil, serr + } + return resp, err + } + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r}) + if err != nil { + return nil, err + } + return resp.(*pb.TxnResponse), nil +} + +func isTxnSerializable(r *pb.TxnRequest) bool { + for _, u := range r.Success { + if r := u.GetRequestRange(); r == nil || !r.Serializable { + return false + } + } + for _, u := range r.Failure { + if r := u.GetRequestRange(); r == nil || !r.Serializable { + return false + } + } + return true +} + +func isTxnReadonly(r *pb.TxnRequest) bool { + for _, u := range r.Success { + if r := u.GetRequestRange(); r == nil { + return false + } + } + for _, u := range r.Failure { + if r := u.GetRequestRange(); r == nil { + return false + } + } + return true +} + +func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { + result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r}) + if r.Physical && result != nil && result.physc != nil { + <-result.physc + // The compaction is done deleting keys; the hash is now settled + // but the data is not necessarily committed. If there's a crash, + // the hash may revert to a hash prior to compaction completing + // if the compaction resumes. Force the finished compaction to + // commit so it won't resume following a crash. + s.be.ForceCommit() + } + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + resp := result.resp.(*pb.CompactionResponse) + if resp == nil { + resp = &pb.CompactionResponse{} + } + if resp.Header == nil { + resp.Header = &pb.ResponseHeader{} + } + resp.Header.Revision = s.kv.Rev() + return resp, nil +} + +func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + // no id given? choose one + for r.ID == int64(lease.NoLease) { + // only use positive int64 id's + r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1)) + } + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) + if err != nil { + return nil, err + } + return resp.(*pb.LeaseGrantResponse), nil +} + +func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) + if err != nil { + return nil, err + } + return resp.(*pb.LeaseRevokeResponse), nil +} + +func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) { + ttl, err := s.lessor.Renew(id) + if err == nil { // already requested to primary lessor(leader) + return ttl, nil + } + if err != lease.ErrNotPrimary { + return -1, err + } + + cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) + defer cancel() + + // renewals don't go through raft; forward to leader manually + for cctx.Err() == nil && err != nil { + leader, lerr := s.waitLeader(cctx) + if lerr != nil { + return -1, lerr + } + for _, url := range leader.PeerURLs { + lurl := url + leasehttp.LeasePrefix + ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt) + if err == nil || err == lease.ErrLeaseNotFound { + return ttl, err + } + } + } + return -1, ErrTimeout +} + +func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { + if s.Leader() == s.ID() { + // primary; timetolive directly from leader + le := s.lessor.Lookup(lease.LeaseID(r.ID)) + if le == nil { + return nil, lease.ErrLeaseNotFound + } + // TODO: fill out ResponseHeader + resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()} + if r.Keys { + ks := le.Keys() + kbs := make([][]byte, len(ks)) + for i := range ks { + kbs[i] = []byte(ks[i]) + } + resp.Keys = kbs + } + return resp, nil + } + + cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) + defer cancel() + + // forward to leader + for cctx.Err() == nil { + leader, err := s.waitLeader(cctx) + if err != nil { + return nil, err + } + for _, url := range leader.PeerURLs { + lurl := url + leasehttp.LeaseInternalPrefix + resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt) + if err == nil { + return resp.LeaseTimeToLiveResponse, nil + } + if err == lease.ErrLeaseNotFound { + return nil, err + } + } + } + return nil, ErrTimeout +} + +func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) { + leader := s.cluster.Member(s.Leader()) + for leader == nil { + // wait an election + dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond + select { + case <-time.After(dur): + leader = s.cluster.Member(s.Leader()) + case <-s.stopping: + return nil, ErrStopped + case <-ctx.Done(): + return nil, ErrNoLeader + } + } + if leader == nil || len(leader.PeerURLs) == 0 { + return nil, ErrNoLeader + } + return leader, nil +} + +func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AlarmResponse), nil +} + +func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthEnableResponse), nil +} + +func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthDisableResponse), nil +} + +func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { + if err := s.linearizableReadNotify(ctx); err != nil { + return nil, err + } + + var resp proto.Message + for { + checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) + if err != nil { + if err != auth.ErrAuthNotEnabled { + plog.Errorf("invalid authentication request to user %s was issued", r.Name) + } + return nil, err + } + + st, err := s.AuthStore().GenTokenPrefix() + if err != nil { + return nil, err + } + + internalReq := &pb.InternalAuthenticateRequest{ + Name: r.Name, + Password: r.Password, + SimpleToken: st, + } + + resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) + if err != nil { + return nil, err + } + if checkedRevision == s.AuthStore().Revision() { + break + } + plog.Infof("revision when password checked is obsolete, retrying") + } + + return resp.(*pb.AuthenticateResponse), nil +} + +func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserAddResponse), nil +} + +func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserDeleteResponse), nil +} + +func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserChangePasswordResponse), nil +} + +func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserGrantRoleResponse), nil +} + +func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserGetResponse), nil +} + +func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserListResponse), nil +} + +func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserRevokeRoleResponse), nil +} + +func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleAddResponse), nil +} + +func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleGrantPermissionResponse), nil +} + +func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleGetResponse), nil +} + +func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleListResponse), nil +} + +func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleRevokePermissionResponse), nil +} + +func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleDeleteResponse), nil +} + +func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { + result, err := s.processInternalRaftRequestOnce(ctx, r) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + return result.resp, nil +} + +func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { + for { + resp, err := s.raftRequestOnce(ctx, r) + if err != auth.ErrAuthOldRevision { + return resp, err + } + } +} + +// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure. +func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error { + for { + ai, err := s.AuthInfoFromCtx(ctx) + if err != nil { + return err + } + if ai == nil { + // chk expects non-nil AuthInfo; use empty credentials + ai = &auth.AuthInfo{} + } + if err = chk(ai); err != nil { + if err == auth.ErrAuthOldRevision { + continue + } + return err + } + // fetch response for serialized request + get() + // empty credentials or current auth info means no need to retry + if ai.Revision == 0 || ai.Revision == s.authStore.Revision() { + return nil + } + // avoid TOCTOU error, retry of the request is required. + } +} + +func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { + ai := s.getAppliedIndex() + ci := s.getCommittedIndex() + if ci > ai+maxGapBetweenApplyAndCommitIndex { + return nil, ErrTooManyRequests + } + + r.Header = &pb.RequestHeader{ + ID: s.reqIDGen.Next(), + } + + authInfo, err := s.AuthInfoFromCtx(ctx) + if err != nil { + return nil, err + } + if authInfo != nil { + r.Header.Username = authInfo.Username + r.Header.AuthRevision = authInfo.Revision + } + + data, err := r.Marshal() + if err != nil { + return nil, err + } + + if len(data) > maxRequestBytes { + return nil, ErrRequestTooLarge + } + + id := r.ID + if id == 0 { + id = r.Header.ID + } + ch := s.w.Register(id) + + cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) + defer cancel() + + start := time.Now() + s.r.Propose(cctx, data) + proposalsPending.Inc() + defer proposalsPending.Dec() + + select { + case x := <-ch: + return x.(*applyResult), nil + case <-cctx.Done(): + proposalsFailed.Inc() + s.w.Trigger(id, nil) // GC wait + return nil, s.parseProposeCtxErr(cctx.Err(), start) + case <-s.done: + return nil, ErrStopped + } +} + +// Watchable returns a watchable interface attached to the etcdserver. +func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } + +func (s *EtcdServer) linearizableReadLoop() { + var rs raft.ReadState + + for { + ctx := make([]byte, 8) + binary.BigEndian.PutUint64(ctx, s.reqIDGen.Next()) + + select { + case <-s.readwaitc: + case <-s.stopping: + return + } + + nextnr := newNotifier() + + s.readMu.Lock() + nr := s.readNotifier + s.readNotifier = nextnr + s.readMu.Unlock() + + cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) + if err := s.r.ReadIndex(cctx, ctx); err != nil { + cancel() + if err == raft.ErrStopped { + return + } + plog.Errorf("failed to get read index from raft: %v", err) + nr.notify(err) + continue + } + cancel() + + var ( + timeout bool + done bool + ) + for !timeout && !done { + select { + case rs = <-s.r.readStateC: + done = bytes.Equal(rs.RequestCtx, ctx) + if !done { + // a previous request might time out. now we should ignore the response of it and + // continue waiting for the response of the current requests. + plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx) + } + case <-time.After(s.Cfg.ReqTimeout()): + plog.Warningf("timed out waiting for read index response") + nr.notify(ErrTimeout) + timeout = true + case <-s.stopping: + return + } + } + if !done { + continue + } + + if ai := s.getAppliedIndex(); ai < rs.Index { + select { + case <-s.applyWait.Wait(rs.Index): + case <-s.stopping: + return + } + } + // unblock all l-reads requested at indices before rs.Index + nr.notify(nil) + } +} + +func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { + s.readMu.RLock() + nc := s.readNotifier + s.readMu.RUnlock() + + // signal linearizable loop for current notify if it hasn't been already + select { + case s.readwaitc <- struct{}{}: + default: + } + + // wait for read state notification + select { + case <-nc.c: + return nc.err + case <-ctx.Done(): + return ctx.Err() + case <-s.done: + return ErrStopped + } +} + +func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) { + if s.Cfg.ClientCertAuthEnabled { + authInfo := s.AuthStore().AuthInfoFromTLS(ctx) + if authInfo != nil { + return authInfo, nil + } + } + + return s.AuthStore().AuthInfoFromCtx(ctx) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/doc.go b/vendor/github.com/coreos/etcd/mvcc/doc.go new file mode 100644 index 000000000..ad5be0308 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/doc.go @@ -0,0 +1,16 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mvcc defines etcd's stable MVCC storage. +package mvcc diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go new file mode 100644 index 000000000..991289cdd --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/index.go @@ -0,0 +1,219 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "sort" + "sync" + + "github.com/google/btree" +) + +type index interface { + Get(key []byte, atRev int64) (rev, created revision, ver int64, err error) + Range(key, end []byte, atRev int64) ([][]byte, []revision) + Put(key []byte, rev revision) + Tombstone(key []byte, rev revision) error + RangeSince(key, end []byte, rev int64) []revision + Compact(rev int64) map[revision]struct{} + Equal(b index) bool + + Insert(ki *keyIndex) + KeyIndex(ki *keyIndex) *keyIndex +} + +type treeIndex struct { + sync.RWMutex + tree *btree.BTree +} + +func newTreeIndex() index { + return &treeIndex{ + tree: btree.New(32), + } +} + +func (ti *treeIndex) Put(key []byte, rev revision) { + keyi := &keyIndex{key: key} + + ti.Lock() + defer ti.Unlock() + item := ti.tree.Get(keyi) + if item == nil { + keyi.put(rev.main, rev.sub) + ti.tree.ReplaceOrInsert(keyi) + return + } + okeyi := item.(*keyIndex) + okeyi.put(rev.main, rev.sub) +} + +func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { + keyi := &keyIndex{key: key} + ti.RLock() + defer ti.RUnlock() + if keyi = ti.keyIndex(keyi); keyi == nil { + return revision{}, revision{}, 0, ErrRevisionNotFound + } + return keyi.get(atRev) +} + +func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { + ti.RLock() + defer ti.RUnlock() + return ti.keyIndex(keyi) +} + +func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { + if item := ti.tree.Get(keyi); item != nil { + return item.(*keyIndex) + } + return nil +} + +func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { + if end == nil { + rev, _, _, err := ti.Get(key, atRev) + if err != nil { + return nil, nil + } + return [][]byte{key}, []revision{rev} + } + + keyi := &keyIndex{key: key} + endi := &keyIndex{key: end} + + ti.RLock() + defer ti.RUnlock() + + ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { + if len(endi.key) > 0 && !item.Less(endi) { + return false + } + curKeyi := item.(*keyIndex) + rev, _, _, err := curKeyi.get(atRev) + if err != nil { + return true + } + revs = append(revs, rev) + keys = append(keys, curKeyi.key) + return true + }) + + return keys, revs +} + +func (ti *treeIndex) Tombstone(key []byte, rev revision) error { + keyi := &keyIndex{key: key} + + ti.Lock() + defer ti.Unlock() + item := ti.tree.Get(keyi) + if item == nil { + return ErrRevisionNotFound + } + + ki := item.(*keyIndex) + return ki.tombstone(rev.main, rev.sub) +} + +// RangeSince returns all revisions from key(including) to end(excluding) +// at or after the given rev. The returned slice is sorted in the order +// of revision. +func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { + ti.RLock() + defer ti.RUnlock() + + keyi := &keyIndex{key: key} + if end == nil { + item := ti.tree.Get(keyi) + if item == nil { + return nil + } + keyi = item.(*keyIndex) + return keyi.since(rev) + } + + endi := &keyIndex{key: end} + var revs []revision + ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { + if len(endi.key) > 0 && !item.Less(endi) { + return false + } + curKeyi := item.(*keyIndex) + revs = append(revs, curKeyi.since(rev)...) + return true + }) + sort.Sort(revisions(revs)) + + return revs +} + +func (ti *treeIndex) Compact(rev int64) map[revision]struct{} { + available := make(map[revision]struct{}) + var emptyki []*keyIndex + plog.Printf("store.index: compact %d", rev) + // TODO: do not hold the lock for long time? + // This is probably OK. Compacting 10M keys takes O(10ms). + ti.Lock() + defer ti.Unlock() + ti.tree.Ascend(compactIndex(rev, available, &emptyki)) + for _, ki := range emptyki { + item := ti.tree.Delete(ki) + if item == nil { + plog.Panic("store.index: unexpected delete failure during compaction") + } + } + return available +} + +func compactIndex(rev int64, available map[revision]struct{}, emptyki *[]*keyIndex) func(i btree.Item) bool { + return func(i btree.Item) bool { + keyi := i.(*keyIndex) + keyi.compact(rev, available) + if keyi.isEmpty() { + *emptyki = append(*emptyki, keyi) + } + return true + } +} + +func (a *treeIndex) Equal(bi index) bool { + b := bi.(*treeIndex) + + if a.tree.Len() != b.tree.Len() { + return false + } + + equal := true + + a.tree.Ascend(func(item btree.Item) bool { + aki := item.(*keyIndex) + bki := b.tree.Get(item).(*keyIndex) + if !aki.equal(bki) { + equal = false + return false + } + return true + }) + + return equal +} + +func (ti *treeIndex) Insert(ki *keyIndex) { + ti.Lock() + defer ti.Unlock() + ti.tree.ReplaceOrInsert(ki) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go new file mode 100644 index 000000000..9104f9b2d --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/key_index.go @@ -0,0 +1,332 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "bytes" + "errors" + "fmt" + + "github.com/google/btree" +) + +var ( + ErrRevisionNotFound = errors.New("mvcc: revision not found") +) + +// keyIndex stores the revisions of a key in the backend. +// Each keyIndex has at least one key generation. +// Each generation might have several key versions. +// Tombstone on a key appends an tombstone version at the end +// of the current generation and creates a new empty generation. +// Each version of a key has an index pointing to the backend. +// +// For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo" +// generate a keyIndex: +// key: "foo" +// rev: 5 +// generations: +// {empty} +// {4.0, 5.0(t)} +// {1.0, 2.0, 3.0(t)} +// +// Compact a keyIndex removes the versions with smaller or equal to +// rev except the largest one. If the generation becomes empty +// during compaction, it will be removed. if all the generations get +// removed, the keyIndex should be removed. + +// For example: +// compact(2) on the previous example +// generations: +// {empty} +// {4.0, 5.0(t)} +// {2.0, 3.0(t)} +// +// compact(4) +// generations: +// {empty} +// {4.0, 5.0(t)} +// +// compact(5): +// generations: +// {empty} -> key SHOULD be removed. +// +// compact(6): +// generations: +// {empty} -> key SHOULD be removed. +type keyIndex struct { + key []byte + modified revision // the main rev of the last modification + generations []generation +} + +// put puts a revision to the keyIndex. +func (ki *keyIndex) put(main int64, sub int64) { + rev := revision{main: main, sub: sub} + + if !rev.GreaterThan(ki.modified) { + plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified) + } + if len(ki.generations) == 0 { + ki.generations = append(ki.generations, generation{}) + } + g := &ki.generations[len(ki.generations)-1] + if len(g.revs) == 0 { // create a new key + keysGauge.Inc() + g.created = rev + } + g.revs = append(g.revs, rev) + g.ver++ + ki.modified = rev +} + +func (ki *keyIndex) restore(created, modified revision, ver int64) { + if len(ki.generations) != 0 { + plog.Panicf("store.keyindex: cannot restore non-empty keyIndex") + } + + ki.modified = modified + g := generation{created: created, ver: ver, revs: []revision{modified}} + ki.generations = append(ki.generations, g) + keysGauge.Inc() +} + +// tombstone puts a revision, pointing to a tombstone, to the keyIndex. +// It also creates a new empty generation in the keyIndex. +// It returns ErrRevisionNotFound when tombstone on an empty generation. +func (ki *keyIndex) tombstone(main int64, sub int64) error { + if ki.isEmpty() { + plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key)) + } + if ki.generations[len(ki.generations)-1].isEmpty() { + return ErrRevisionNotFound + } + ki.put(main, sub) + ki.generations = append(ki.generations, generation{}) + keysGauge.Dec() + return nil +} + +// get gets the modified, created revision and version of the key that satisfies the given atRev. +// Rev must be higher than or equal to the given atRev. +func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err error) { + if ki.isEmpty() { + plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) + } + g := ki.findGeneration(atRev) + if g.isEmpty() { + return revision{}, revision{}, 0, ErrRevisionNotFound + } + + n := g.walk(func(rev revision) bool { return rev.main > atRev }) + if n != -1 { + return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil + } + + return revision{}, revision{}, 0, ErrRevisionNotFound +} + +// since returns revisions since the given rev. Only the revision with the +// largest sub revision will be returned if multiple revisions have the same +// main revision. +func (ki *keyIndex) since(rev int64) []revision { + if ki.isEmpty() { + plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) + } + since := revision{rev, 0} + var gi int + // find the generations to start checking + for gi = len(ki.generations) - 1; gi > 0; gi-- { + g := ki.generations[gi] + if g.isEmpty() { + continue + } + if since.GreaterThan(g.created) { + break + } + } + + var revs []revision + var last int64 + for ; gi < len(ki.generations); gi++ { + for _, r := range ki.generations[gi].revs { + if since.GreaterThan(r) { + continue + } + if r.main == last { + // replace the revision with a new one that has higher sub value, + // because the original one should not be seen by external + revs[len(revs)-1] = r + continue + } + revs = append(revs, r) + last = r.main + } + } + return revs +} + +// compact compacts a keyIndex by removing the versions with smaller or equal +// revision than the given atRev except the largest one (If the largest one is +// a tombstone, it will not be kept). +// If a generation becomes empty during compaction, it will be removed. +func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { + if ki.isEmpty() { + plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key)) + } + + // walk until reaching the first revision that has an revision smaller or equal to + // the atRev. + // add it to the available map + f := func(rev revision) bool { + if rev.main <= atRev { + available[rev] = struct{}{} + return false + } + return true + } + + i, g := 0, &ki.generations[0] + // find first generation includes atRev or created after atRev + for i < len(ki.generations)-1 { + if tomb := g.revs[len(g.revs)-1].main; tomb > atRev { + break + } + i++ + g = &ki.generations[i] + } + + if !g.isEmpty() { + n := g.walk(f) + // remove the previous contents. + if n != -1 { + g.revs = g.revs[n:] + } + // remove any tombstone + if len(g.revs) == 1 && i != len(ki.generations)-1 { + delete(available, g.revs[0]) + i++ + } + } + // remove the previous generations. + ki.generations = ki.generations[i:] +} + +func (ki *keyIndex) isEmpty() bool { + return len(ki.generations) == 1 && ki.generations[0].isEmpty() +} + +// findGeneration finds out the generation of the keyIndex that the +// given rev belongs to. If the given rev is at the gap of two generations, +// which means that the key does not exist at the given rev, it returns nil. +func (ki *keyIndex) findGeneration(rev int64) *generation { + lastg := len(ki.generations) - 1 + cg := lastg + + for cg >= 0 { + if len(ki.generations[cg].revs) == 0 { + cg-- + continue + } + g := ki.generations[cg] + if cg != lastg { + if tomb := g.revs[len(g.revs)-1].main; tomb <= rev { + return nil + } + } + if g.revs[0].main <= rev { + return &ki.generations[cg] + } + cg-- + } + return nil +} + +func (a *keyIndex) Less(b btree.Item) bool { + return bytes.Compare(a.key, b.(*keyIndex).key) == -1 +} + +func (a *keyIndex) equal(b *keyIndex) bool { + if !bytes.Equal(a.key, b.key) { + return false + } + if a.modified != b.modified { + return false + } + if len(a.generations) != len(b.generations) { + return false + } + for i := range a.generations { + ag, bg := a.generations[i], b.generations[i] + if !ag.equal(bg) { + return false + } + } + return true +} + +func (ki *keyIndex) String() string { + var s string + for _, g := range ki.generations { + s += g.String() + } + return s +} + +// generation contains multiple revisions of a key. +type generation struct { + ver int64 + created revision // when the generation is created (put in first revision). + revs []revision +} + +func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 } + +// walk walks through the revisions in the generation in descending order. +// It passes the revision to the given function. +// walk returns until: 1. it finishes walking all pairs 2. the function returns false. +// walk returns the position at where it stopped. If it stopped after +// finishing walking, -1 will be returned. +func (g *generation) walk(f func(rev revision) bool) int { + l := len(g.revs) + for i := range g.revs { + ok := f(g.revs[l-i-1]) + if !ok { + return l - i - 1 + } + } + return -1 +} + +func (g *generation) String() string { + return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs) +} + +func (a generation) equal(b generation) bool { + if a.ver != b.ver { + return false + } + if len(a.revs) != len(b.revs) { + return false + } + + for i := range a.revs { + ar, br := a.revs[i], b.revs[i] + if ar != br { + return false + } + } + return true +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go new file mode 100644 index 000000000..6636347aa --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kv.go @@ -0,0 +1,147 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +type RangeOptions struct { + Limit int64 + Rev int64 + Count bool +} + +type RangeResult struct { + KVs []mvccpb.KeyValue + Rev int64 + Count int +} + +type ReadView interface { + // FirstRev returns the first KV revision at the time of opening the txn. + // After a compaction, the first revision increases to the compaction + // revision. + FirstRev() int64 + + // Rev returns the revision of the KV at the time of opening the txn. + Rev() int64 + + // Range gets the keys in the range at rangeRev. + // The returned rev is the current revision of the KV when the operation is executed. + // If rangeRev <=0, range gets the keys at currentRev. + // If `end` is nil, the request returns the key. + // If `end` is not nil and not empty, it gets the keys in range [key, range_end). + // If `end` is not nil and empty, it gets the keys greater than or equal to key. + // Limit limits the number of keys returned. + // If the required rev is compacted, ErrCompacted will be returned. + Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) +} + +// TxnRead represents a read-only transaction with operations that will not +// block other read transactions. +type TxnRead interface { + ReadView + // End marks the transaction is complete and ready to commit. + End() +} + +type WriteView interface { + // DeleteRange deletes the given range from the store. + // A deleteRange increases the rev of the store if any key in the range exists. + // The number of key deleted will be returned. + // The returned rev is the current revision of the KV when the operation is executed. + // It also generates one event for each key delete in the event history. + // if the `end` is nil, deleteRange deletes the key. + // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end). + DeleteRange(key, end []byte) (n, rev int64) + + // Put puts the given key, value into the store. Put also takes additional argument lease to + // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease + // id. + // A put also increases the rev of the store, and generates one event in the event history. + // The returned rev is the current revision of the KV when the operation is executed. + Put(key, value []byte, lease lease.LeaseID) (rev int64) +} + +// TxnWrite represents a transaction that can modify the store. +type TxnWrite interface { + TxnRead + WriteView + // Changes gets the changes made since opening the write txn. + Changes() []mvccpb.KeyValue +} + +// txnReadWrite coerces a read txn to a write, panicking on any write operation. +type txnReadWrite struct{ TxnRead } + +func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") } +func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + panic("unexpected Put") +} +func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil } + +func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} } + +type KV interface { + ReadView + WriteView + + // Read creates a read transaction. + Read() TxnRead + + // Write creates a write transaction. + Write() TxnWrite + + // Hash retrieves the hash of KV state and revision. + // This method is designed for consistency checking purposes. + Hash() (hash uint32, revision int64, err error) + + // Compact frees all superseded keys with revisions less than rev. + Compact(rev int64) (<-chan struct{}, error) + + // Commit commits outstanding txns into the underlying backend. + Commit() + + // Restore restores the KV store from a backend. + Restore(b backend.Backend) error + Close() error +} + +// WatchableKV is a KV that can be watched. +type WatchableKV interface { + KV + Watchable +} + +// Watchable is the interface that wraps the NewWatchStream function. +type Watchable interface { + // NewWatchStream returns a WatchStream that can be used to + // watch events happened or happening on the KV. + NewWatchStream() WatchStream +} + +// ConsistentWatchableKV is a WatchableKV that understands the consistency +// algorithm and consistent index. +// If the consistent index of executing entry is not larger than the +// consistent index of ConsistentWatchableKV, all operations in +// this entry are skipped and return empty response. +type ConsistentWatchableKV interface { + WatchableKV + // ConsistentIndex returns the current consistent index of the KV. + ConsistentIndex() uint64 +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kv_view.go b/vendor/github.com/coreos/etcd/mvcc/kv_view.go new file mode 100644 index 000000000..f40ba8edc --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kv_view.go @@ -0,0 +1,53 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" +) + +type readView struct{ kv KV } + +func (rv *readView) FirstRev() int64 { + tr := rv.kv.Read() + defer tr.End() + return tr.FirstRev() +} + +func (rv *readView) Rev() int64 { + tr := rv.kv.Read() + defer tr.End() + return tr.Rev() +} + +func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + tr := rv.kv.Read() + defer tr.End() + return tr.Range(key, end, ro) +} + +type writeView struct{ kv KV } + +func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) { + tw := wv.kv.Write() + defer tw.End() + return tw.DeleteRange(key, end) +} + +func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + tw := wv.kv.Write() + defer tw.End() + return tw.Put(key, value, lease) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go new file mode 100644 index 000000000..28a508ccb --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore.go @@ -0,0 +1,459 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "encoding/binary" + "errors" + "math" + "sync" + "time" + + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" + "github.com/coreos/etcd/pkg/schedule" + "github.com/coreos/pkg/capnslog" + "golang.org/x/net/context" +) + +var ( + keyBucketName = []byte("key") + metaBucketName = []byte("meta") + + consistentIndexKeyName = []byte("consistent_index") + scheduledCompactKeyName = []byte("scheduledCompactRev") + finishedCompactKeyName = []byte("finishedCompactRev") + + ErrCompacted = errors.New("mvcc: required revision has been compacted") + ErrFutureRev = errors.New("mvcc: required revision is a future revision") + ErrCanceled = errors.New("mvcc: watcher is canceled") + ErrClosed = errors.New("mvcc: closed") + + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") +) + +const ( + // markedRevBytesLen is the byte length of marked revision. + // The first `revBytesLen` bytes represents a normal revision. The last + // one byte is the mark. + markedRevBytesLen = revBytesLen + 1 + markBytePosition = markedRevBytesLen - 1 + markTombstone byte = 't' +) + +var restoreChunkKeys = 10000 // non-const for testing + +// ConsistentIndexGetter is an interface that wraps the Get method. +// Consistent index is the offset of an entry in a consistent replicated log. +type ConsistentIndexGetter interface { + // ConsistentIndex returns the consistent index of current executing entry. + ConsistentIndex() uint64 +} + +type store struct { + ReadView + WriteView + + // mu read locks for txns and write locks for non-txn store changes. + mu sync.RWMutex + + ig ConsistentIndexGetter + + b backend.Backend + kvindex index + + le lease.Lessor + + // revMuLock protects currentRev and compactMainRev. + // Locked at end of write txn and released after write txn unlock lock. + // Locked before locking read txn and released after locking. + revMu sync.RWMutex + // currentRev is the revision of the last completed transaction. + currentRev int64 + // compactMainRev is the main revision of the last compaction. + compactMainRev int64 + + // bytesBuf8 is a byte slice of length 8 + // to avoid a repetitive allocation in saveIndex. + bytesBuf8 []byte + + fifoSched schedule.Scheduler + + stopc chan struct{} +} + +// NewStore returns a new store. It is useful to create a store inside +// mvcc pkg. It should only be used for testing externally. +func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store { + s := &store{ + b: b, + ig: ig, + kvindex: newTreeIndex(), + + le: le, + + currentRev: 1, + compactMainRev: -1, + + bytesBuf8: make([]byte, 8), + fifoSched: schedule.NewFIFOScheduler(), + + stopc: make(chan struct{}), + } + s.ReadView = &readView{s} + s.WriteView = &writeView{s} + if s.le != nil { + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) + } + + tx := s.b.BatchTx() + tx.Lock() + tx.UnsafeCreateBucket(keyBucketName) + tx.UnsafeCreateBucket(metaBucketName) + tx.Unlock() + s.b.ForceCommit() + + if err := s.restore(); err != nil { + // TODO: return the error instead of panic here? + panic("failed to recover store from backend") + } + + return s +} + +func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { + if ctx == nil || ctx.Err() != nil { + s.mu.Lock() + select { + case <-s.stopc: + default: + f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } + s.fifoSched.Schedule(f) + } + s.mu.Unlock() + return + } + close(ch) +} + +func (s *store) Hash() (hash uint32, revision int64, err error) { + s.b.ForceCommit() + h, err := s.b.Hash(DefaultIgnores) + return h, s.currentRev, err +} + +func (s *store) Compact(rev int64) (<-chan struct{}, error) { + s.mu.Lock() + defer s.mu.Unlock() + s.revMu.Lock() + defer s.revMu.Unlock() + + if rev <= s.compactMainRev { + ch := make(chan struct{}) + f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } + s.fifoSched.Schedule(f) + return ch, ErrCompacted + } + if rev > s.currentRev { + return nil, ErrFutureRev + } + + start := time.Now() + + s.compactMainRev = rev + + rbytes := newRevBytes() + revToBytes(revision{main: rev}, rbytes) + + tx := s.b.BatchTx() + tx.Lock() + tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes) + tx.Unlock() + // ensure that desired compaction is persisted + s.b.ForceCommit() + + keep := s.kvindex.Compact(rev) + ch := make(chan struct{}) + var j = func(ctx context.Context) { + if ctx.Err() != nil { + s.compactBarrier(ctx, ch) + return + } + if !s.scheduleCompaction(rev, keep) { + s.compactBarrier(nil, ch) + return + } + close(ch) + } + + s.fifoSched.Schedule(j) + + indexCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond)) + return ch, nil +} + +// DefaultIgnores is a map of keys to ignore in hash checking. +var DefaultIgnores map[backend.IgnoreKey]struct{} + +func init() { + DefaultIgnores = map[backend.IgnoreKey]struct{}{ + // consistent index might be changed due to v2 internal sync, which + // is not controllable by the user. + {Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {}, + } +} + +func (s *store) Commit() { + s.mu.Lock() + defer s.mu.Unlock() + + tx := s.b.BatchTx() + tx.Lock() + s.saveIndex(tx) + tx.Unlock() + s.b.ForceCommit() +} + +func (s *store) Restore(b backend.Backend) error { + s.mu.Lock() + defer s.mu.Unlock() + + close(s.stopc) + s.fifoSched.Stop() + + s.b = b + s.kvindex = newTreeIndex() + s.currentRev = 1 + s.compactMainRev = -1 + s.fifoSched = schedule.NewFIFOScheduler() + s.stopc = make(chan struct{}) + + return s.restore() +} + +func (s *store) restore() error { + reportDbTotalSizeInBytesMu.Lock() + b := s.b + reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } + reportDbTotalSizeInBytesMu.Unlock() + + min, max := newRevBytes(), newRevBytes() + revToBytes(revision{main: 1}, min) + revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max) + + keyToLease := make(map[string]lease.LeaseID) + + // restore index + tx := s.b.BatchTx() + tx.Lock() + + _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0) + if len(finishedCompactBytes) != 0 { + s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main + plog.Printf("restore compact to %d", s.compactMainRev) + } + _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) + scheduledCompact := int64(0) + if len(scheduledCompactBytes) != 0 { + scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main + } + + // index keys concurrently as they're loaded in from tx + keysGauge.Set(0) + rkvc, revc := restoreIntoIndex(s.kvindex) + for { + keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys)) + if len(keys) == 0 { + break + } + // rkvc blocks if the total pending keys exceeds the restore + // chunk size to keep keys from consuming too much memory. + restoreChunk(rkvc, keys, vals, keyToLease) + if len(keys) < restoreChunkKeys { + // partial set implies final set + break + } + // next set begins after where this one ended + newMin := bytesToRev(keys[len(keys)-1][:revBytesLen]) + newMin.sub++ + revToBytes(newMin, min) + } + close(rkvc) + s.currentRev = <-revc + + // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. + // the correct revision should be set to compaction revision in the case, not the largest revision + // we have seen. + if s.currentRev < s.compactMainRev { + s.currentRev = s.compactMainRev + } + if scheduledCompact <= s.compactMainRev { + scheduledCompact = 0 + } + + for key, lid := range keyToLease { + if s.le == nil { + panic("no lessor to attach lease") + } + err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}}) + if err != nil { + plog.Errorf("unexpected Attach error: %v", err) + } + } + + tx.Unlock() + + if scheduledCompact != 0 { + s.Compact(scheduledCompact) + plog.Printf("resume scheduled compaction at %d", scheduledCompact) + } + + return nil +} + +type revKeyValue struct { + key []byte + kv mvccpb.KeyValue + kstr string +} + +func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { + rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) + go func() { + currentRev := int64(1) + defer func() { revc <- currentRev }() + // restore the tree index from streaming the unordered index. + kiCache := make(map[string]*keyIndex, restoreChunkKeys) + for rkv := range rkvc { + ki, ok := kiCache[rkv.kstr] + // purge kiCache if many keys but still missing in the cache + if !ok && len(kiCache) >= restoreChunkKeys { + i := 10 + for k := range kiCache { + delete(kiCache, k) + if i--; i == 0 { + break + } + } + } + // cache miss, fetch from tree index if there + if !ok { + ki = &keyIndex{key: rkv.kv.Key} + if idxKey := idx.KeyIndex(ki); idxKey != nil { + kiCache[rkv.kstr], ki = idxKey, idxKey + ok = true + } + } + rev := bytesToRev(rkv.key) + currentRev = rev.main + if ok { + if isTombstone(rkv.key) { + ki.tombstone(rev.main, rev.sub) + continue + } + ki.put(rev.main, rev.sub) + } else if !isTombstone(rkv.key) { + ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) + idx.Insert(ki) + kiCache[rkv.kstr] = ki + } + } + }() + return rkvc, revc +} + +func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { + for i, key := range keys { + rkv := revKeyValue{key: key} + if err := rkv.kv.Unmarshal(vals[i]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + rkv.kstr = string(rkv.kv.Key) + if isTombstone(key) { + delete(keyToLease, rkv.kstr) + } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease { + keyToLease[rkv.kstr] = lid + } else { + delete(keyToLease, rkv.kstr) + } + kvc <- rkv + } +} + +func (s *store) Close() error { + close(s.stopc) + s.fifoSched.Stop() + return nil +} + +func (a *store) Equal(b *store) bool { + if a.currentRev != b.currentRev { + return false + } + if a.compactMainRev != b.compactMainRev { + return false + } + return a.kvindex.Equal(b.kvindex) +} + +func (s *store) saveIndex(tx backend.BatchTx) { + if s.ig == nil { + return + } + bs := s.bytesBuf8 + binary.BigEndian.PutUint64(bs, s.ig.ConsistentIndex()) + // put the index into the underlying backend + // tx has been locked in TxnBegin, so there is no need to lock it again + tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs) +} + +func (s *store) ConsistentIndex() uint64 { + // TODO: cache index in a uint64 field? + tx := s.b.BatchTx() + tx.Lock() + defer tx.Unlock() + _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0) + if len(vs) == 0 { + return 0 + } + return binary.BigEndian.Uint64(vs[0]) +} + +// appendMarkTombstone appends tombstone mark to normal revision bytes. +func appendMarkTombstone(b []byte) []byte { + if len(b) != revBytesLen { + plog.Panicf("cannot append mark to non normal revision bytes") + } + return append(b, markTombstone) +} + +// isTombstone checks whether the revision bytes is a tombstone. +func isTombstone(b []byte) bool { + return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone +} + +// revBytesRange returns the range of revision bytes at +// the given revision. +func revBytesRange(rev revision) (start, end []byte) { + start = newRevBytes() + revToBytes(rev, start) + + end = newRevBytes() + endRev := revision{main: rev.main, sub: rev.sub + 1} + revToBytes(endRev, end) + + return start, end +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go new file mode 100644 index 000000000..bbd38f547 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go @@ -0,0 +1,66 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "encoding/binary" + "time" +) + +func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool { + totalStart := time.Now() + defer dbCompactionTotalDurations.Observe(float64(time.Since(totalStart) / time.Millisecond)) + + end := make([]byte, 8) + binary.BigEndian.PutUint64(end, uint64(compactMainRev+1)) + + batchsize := int64(10000) + last := make([]byte, 8+1+8) + for { + var rev revision + + start := time.Now() + tx := s.b.BatchTx() + tx.Lock() + + keys, _ := tx.UnsafeRange(keyBucketName, last, end, batchsize) + for _, key := range keys { + rev = bytesToRev(key) + if _, ok := keep[rev]; !ok { + tx.UnsafeDelete(keyBucketName, key) + } + } + + if len(keys) < int(batchsize) { + rbytes := make([]byte, 8+1+8) + revToBytes(revision{main: compactMainRev}, rbytes) + tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes) + tx.Unlock() + plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart)) + return true + } + + // update last + revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last) + tx.Unlock() + dbCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond)) + + select { + case <-time.After(100 * time.Millisecond): + case <-s.stopc: + return false + } + } +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go new file mode 100644 index 000000000..13d4d530d --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go @@ -0,0 +1,253 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +type storeTxnRead struct { + s *store + tx backend.ReadTx + + firstRev int64 + rev int64 +} + +func (s *store) Read() TxnRead { + s.mu.RLock() + tx := s.b.ReadTx() + s.revMu.RLock() + tx.Lock() + firstRev, rev := s.compactMainRev, s.currentRev + s.revMu.RUnlock() + return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev}) +} + +func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev } +func (tr *storeTxnRead) Rev() int64 { return tr.rev } + +func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + return tr.rangeKeys(key, end, tr.Rev(), ro) +} + +func (tr *storeTxnRead) End() { + tr.tx.Unlock() + tr.s.mu.RUnlock() +} + +type storeTxnWrite struct { + *storeTxnRead + tx backend.BatchTx + // beginRev is the revision where the txn begins; it will write to the next revision. + beginRev int64 + changes []mvccpb.KeyValue +} + +func (s *store) Write() TxnWrite { + s.mu.RLock() + tx := s.b.BatchTx() + tx.Lock() + tw := &storeTxnWrite{ + storeTxnRead: &storeTxnRead{s, tx, 0, 0}, + tx: tx, + beginRev: s.currentRev, + changes: make([]mvccpb.KeyValue, 0, 4), + } + return newMetricsTxnWrite(tw) +} + +func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev } + +func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + rev := tw.beginRev + if len(tw.changes) > 0 { + rev++ + } + return tw.rangeKeys(key, end, rev, ro) +} + +func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) { + if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 { + return n, int64(tw.beginRev + 1) + } + return 0, int64(tw.beginRev) +} + +func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 { + tw.put(key, value, lease) + return int64(tw.beginRev + 1) +} + +func (tw *storeTxnWrite) End() { + // only update index if the txn modifies the mvcc state. + if len(tw.changes) != 0 { + tw.s.saveIndex(tw.tx) + // hold revMu lock to prevent new read txns from opening until writeback. + tw.s.revMu.Lock() + tw.s.currentRev++ + } + tw.tx.Unlock() + if len(tw.changes) != 0 { + tw.s.revMu.Unlock() + } + tw.s.mu.RUnlock() +} + +func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { + rev := ro.Rev + if rev > curRev { + return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev + } + if rev <= 0 { + rev = curRev + } + if rev < tr.s.compactMainRev { + return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted + } + + _, revpairs := tr.s.kvindex.Range(key, end, int64(rev)) + if len(revpairs) == 0 { + return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil + } + if ro.Count { + return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil + } + + var kvs []mvccpb.KeyValue + for _, revpair := range revpairs { + start, end := revBytesRange(revpair) + _, vs := tr.tx.UnsafeRange(keyBucketName, start, end, 0) + if len(vs) != 1 { + plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) + } + + var kv mvccpb.KeyValue + if err := kv.Unmarshal(vs[0]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + kvs = append(kvs, kv) + if ro.Limit > 0 && len(kvs) >= int(ro.Limit) { + break + } + } + return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil +} + +func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { + rev := tw.beginRev + 1 + c := rev + oldLease := lease.NoLease + + // if the key exists before, use its previous created and + // get its previous leaseID + _, created, ver, err := tw.s.kvindex.Get(key, rev) + if err == nil { + c = created.main + oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)}) + } + + ibytes := newRevBytes() + idxRev := revision{main: rev, sub: int64(len(tw.changes))} + revToBytes(idxRev, ibytes) + + ver = ver + 1 + kv := mvccpb.KeyValue{ + Key: key, + Value: value, + CreateRevision: c, + ModRevision: rev, + Version: ver, + Lease: int64(leaseID), + } + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) + tw.s.kvindex.Put(key, idxRev) + tw.changes = append(tw.changes, kv) + + if oldLease != lease.NoLease { + if tw.s.le == nil { + panic("no lessor to detach lease") + } + err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + plog.Errorf("unexpected error from lease detach: %v", err) + } + } + if leaseID != lease.NoLease { + if tw.s.le == nil { + panic("no lessor to attach lease") + } + err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + panic("unexpected error from lease Attach") + } + } +} + +func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { + rrev := tw.beginRev + if len(tw.changes) > 0 { + rrev += 1 + } + keys, revs := tw.s.kvindex.Range(key, end, rrev) + if len(keys) == 0 { + return 0 + } + for i, key := range keys { + tw.delete(key, revs[i]) + } + return int64(len(keys)) +} + +func (tw *storeTxnWrite) delete(key []byte, rev revision) { + ibytes := newRevBytes() + idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))} + revToBytes(idxRev, ibytes) + ibytes = appendMarkTombstone(ibytes) + + kv := mvccpb.KeyValue{Key: key} + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) + err = tw.s.kvindex.Tombstone(key, idxRev) + if err != nil { + plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) + } + tw.changes = append(tw.changes, kv) + + item := lease.LeaseItem{Key: string(key)} + leaseID := tw.s.le.GetLease(item) + + if leaseID != lease.NoLease { + err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) + if err != nil { + plog.Errorf("cannot detach %v", err) + } + } +} + +func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes } diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go new file mode 100644 index 000000000..a65fe59b9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/metrics.go @@ -0,0 +1,174 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + rangeCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "range_total", + Help: "Total number of ranges seen by this member.", + }) + + putCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "put_total", + Help: "Total number of puts seen by this member.", + }) + + deleteCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "delete_total", + Help: "Total number of deletes seen by this member.", + }) + + txnCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "txn_total", + Help: "Total number of txns seen by this member.", + }) + + keysGauge = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "keys_total", + Help: "Total number of keys.", + }) + + watchStreamGauge = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "watch_stream_total", + Help: "Total number of watch streams.", + }) + + watcherGauge = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "watcher_total", + Help: "Total number of watchers.", + }) + + slowWatcherGauge = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "slow_watcher_total", + Help: "Total number of unsynced slow watchers.", + }) + + totalEventsCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "events_total", + Help: "Total number of events sent by this member.", + }) + + pendingEventsGauge = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "pending_events_total", + Help: "Total number of pending events to be sent.", + }) + + indexCompactionPauseDurations = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "index_compaction_pause_duration_milliseconds", + Help: "Bucketed histogram of index compaction pause duration.", + // 0.5ms -> 1second + Buckets: prometheus.ExponentialBuckets(0.5, 2, 12), + }) + + dbCompactionPauseDurations = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "db_compaction_pause_duration_milliseconds", + Help: "Bucketed histogram of db compaction pause duration.", + // 1ms -> 4second + Buckets: prometheus.ExponentialBuckets(1, 2, 13), + }) + + dbCompactionTotalDurations = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "db_compaction_total_duration_milliseconds", + Help: "Bucketed histogram of db compaction total duration.", + // 100ms -> 800second + Buckets: prometheus.ExponentialBuckets(100, 2, 14), + }) + + dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "db_total_size_in_bytes", + Help: "Total size of the underlying database in bytes.", + }, + func() float64 { + reportDbTotalSizeInBytesMu.RLock() + defer reportDbTotalSizeInBytesMu.RUnlock() + return reportDbTotalSizeInBytes() + }, + ) + // overridden by mvcc initialization + reportDbTotalSizeInBytesMu sync.RWMutex + reportDbTotalSizeInBytes func() float64 = func() float64 { return 0 } +) + +func init() { + prometheus.MustRegister(rangeCounter) + prometheus.MustRegister(putCounter) + prometheus.MustRegister(deleteCounter) + prometheus.MustRegister(txnCounter) + prometheus.MustRegister(keysGauge) + prometheus.MustRegister(watchStreamGauge) + prometheus.MustRegister(watcherGauge) + prometheus.MustRegister(slowWatcherGauge) + prometheus.MustRegister(totalEventsCounter) + prometheus.MustRegister(pendingEventsGauge) + prometheus.MustRegister(indexCompactionPauseDurations) + prometheus.MustRegister(dbCompactionPauseDurations) + prometheus.MustRegister(dbCompactionTotalDurations) + prometheus.MustRegister(dbTotalSize) +} + +// ReportEventReceived reports that an event is received. +// This function should be called when the external systems received an +// event from mvcc.Watcher. +func ReportEventReceived(n int) { + pendingEventsGauge.Sub(float64(n)) + totalEventsCounter.Add(float64(n)) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go new file mode 100644 index 000000000..fd2144279 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go @@ -0,0 +1,67 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" +) + +type metricsTxnWrite struct { + TxnWrite + ranges uint + puts uint + deletes uint +} + +func newMetricsTxnRead(tr TxnRead) TxnRead { + return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0} +} + +func newMetricsTxnWrite(tw TxnWrite) TxnWrite { + return &metricsTxnWrite{tw, 0, 0, 0} +} + +func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) { + tw.ranges++ + return tw.TxnWrite.Range(key, end, ro) +} + +func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) { + tw.deletes++ + return tw.TxnWrite.DeleteRange(key, end) +} + +func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + tw.puts++ + return tw.TxnWrite.Put(key, value, lease) +} + +func (tw *metricsTxnWrite) End() { + defer tw.TxnWrite.End() + if sum := tw.ranges + tw.puts + tw.deletes; sum != 1 { + if sum > 1 { + txnCounter.Inc() + } + return + } + switch { + case tw.ranges == 1: + rangeCounter.Inc() + case tw.puts == 1: + putCounter.Inc() + case tw.deletes == 1: + deleteCounter.Inc() + } +} diff --git a/vendor/github.com/coreos/etcd/mvcc/revision.go b/vendor/github.com/coreos/etcd/mvcc/revision.go new file mode 100644 index 000000000..5fa35a1c2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/revision.go @@ -0,0 +1,67 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import "encoding/binary" + +// revBytesLen is the byte length of a normal revision. +// First 8 bytes is the revision.main in big-endian format. The 9th byte +// is a '_'. The last 8 bytes is the revision.sub in big-endian format. +const revBytesLen = 8 + 1 + 8 + +// A revision indicates modification of the key-value space. +// The set of changes that share same main revision changes the key-value space atomically. +type revision struct { + // main is the main revision of a set of changes that happen atomically. + main int64 + + // sub is the the sub revision of a change in a set of changes that happen + // atomically. Each change has different increasing sub revision in that + // set. + sub int64 +} + +func (a revision) GreaterThan(b revision) bool { + if a.main > b.main { + return true + } + if a.main < b.main { + return false + } + return a.sub > b.sub +} + +func newRevBytes() []byte { + return make([]byte, revBytesLen, markedRevBytesLen) +} + +func revToBytes(rev revision, bytes []byte) { + binary.BigEndian.PutUint64(bytes, uint64(rev.main)) + bytes[8] = '_' + binary.BigEndian.PutUint64(bytes[9:], uint64(rev.sub)) +} + +func bytesToRev(bytes []byte) revision { + return revision{ + main: int64(binary.BigEndian.Uint64(bytes[0:8])), + sub: int64(binary.BigEndian.Uint64(bytes[9:])), + } +} + +type revisions []revision + +func (a revisions) Len() int { return len(a) } +func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) } +func (a revisions) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/coreos/etcd/mvcc/util.go b/vendor/github.com/coreos/etcd/mvcc/util.go new file mode 100644 index 000000000..8a0df0bfc --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/util.go @@ -0,0 +1,56 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "encoding/binary" + + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +func UpdateConsistentIndex(be backend.Backend, index uint64) { + tx := be.BatchTx() + tx.Lock() + defer tx.Unlock() + + var oldi uint64 + _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0) + if len(vs) != 0 { + oldi = binary.BigEndian.Uint64(vs[0]) + } + + if index <= oldi { + return + } + + bs := make([]byte, 8) + binary.BigEndian.PutUint64(bs, index) + tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs) +} + +func WriteKV(be backend.Backend, kv mvccpb.KeyValue) { + ibytes := newRevBytes() + revToBytes(revision{main: kv.ModRevision}, ibytes) + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + be.BatchTx().Lock() + be.BatchTx().UnsafePut(keyBucketName, ibytes, d) + be.BatchTx().Unlock() +} diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go new file mode 100644 index 000000000..68d9ab71d --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go @@ -0,0 +1,522 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "sync" + "time" + + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +const ( + // chanBufLen is the length of the buffered chan + // for sending out watched events. + // TODO: find a good buf value. 1024 is just a random one that + // seems to be reasonable. + chanBufLen = 1024 + + // maxWatchersPerSync is the number of watchers to sync in a single batch + maxWatchersPerSync = 512 +) + +type watchable interface { + watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) + progress(w *watcher) + rev() int64 +} + +type watchableStore struct { + *store + + // mu protects watcher groups and batches. It should never be locked + // before locking store.mu to avoid deadlock. + mu sync.RWMutex + + // victims are watcher batches that were blocked on the watch channel + victims []watcherBatch + victimc chan struct{} + + // contains all unsynced watchers that needs to sync with events that have happened + unsynced watcherGroup + + // contains all synced watchers that are in sync with the progress of the store. + // The key of the map is the key that the watcher watches on. + synced watcherGroup + + stopc chan struct{} + wg sync.WaitGroup +} + +// cancelFunc updates unsynced and synced maps when running +// cancel operations. +type cancelFunc func() + +func New(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV { + return newWatchableStore(b, le, ig) +} + +func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore { + s := &watchableStore{ + store: NewStore(b, le, ig), + victimc: make(chan struct{}, 1), + unsynced: newWatcherGroup(), + synced: newWatcherGroup(), + stopc: make(chan struct{}), + } + s.store.ReadView = &readView{s} + s.store.WriteView = &writeView{s} + if s.le != nil { + // use this store as the deleter so revokes trigger watch events + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) + } + s.wg.Add(2) + go s.syncWatchersLoop() + go s.syncVictimsLoop() + return s +} + +func (s *watchableStore) Close() error { + close(s.stopc) + s.wg.Wait() + return s.store.Close() +} + +func (s *watchableStore) NewWatchStream() WatchStream { + watchStreamGauge.Inc() + return &watchStream{ + watchable: s, + ch: make(chan WatchResponse, chanBufLen), + cancels: make(map[WatchID]cancelFunc), + watchers: make(map[WatchID]*watcher), + } +} + +func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) { + wa := &watcher{ + key: key, + end: end, + minRev: startRev, + id: id, + ch: ch, + fcs: fcs, + } + + s.mu.Lock() + s.revMu.RLock() + synced := startRev > s.store.currentRev || startRev == 0 + if synced { + wa.minRev = s.store.currentRev + 1 + if startRev > wa.minRev { + wa.minRev = startRev + } + } + if synced { + s.synced.add(wa) + } else { + slowWatcherGauge.Inc() + s.unsynced.add(wa) + } + s.revMu.RUnlock() + s.mu.Unlock() + + watcherGauge.Inc() + + return wa, func() { s.cancelWatcher(wa) } +} + +// cancelWatcher removes references of the watcher from the watchableStore +func (s *watchableStore) cancelWatcher(wa *watcher) { + for { + s.mu.Lock() + + if s.unsynced.delete(wa) { + slowWatcherGauge.Dec() + break + } else if s.synced.delete(wa) { + break + } else if wa.compacted { + break + } + + if !wa.victim { + panic("watcher not victim but not in watch groups") + } + + var victimBatch watcherBatch + for _, wb := range s.victims { + if wb[wa] != nil { + victimBatch = wb + break + } + } + if victimBatch != nil { + slowWatcherGauge.Dec() + delete(victimBatch, wa) + break + } + + // victim being processed so not accessible; retry + s.mu.Unlock() + time.Sleep(time.Millisecond) + } + + watcherGauge.Dec() + s.mu.Unlock() +} + +func (s *watchableStore) Restore(b backend.Backend) error { + s.mu.Lock() + defer s.mu.Unlock() + err := s.store.Restore(b) + if err != nil { + return err + } + + for wa := range s.synced.watchers { + s.unsynced.watchers.add(wa) + } + s.synced = newWatcherGroup() + return nil +} + +// syncWatchersLoop syncs the watcher in the unsynced map every 100ms. +func (s *watchableStore) syncWatchersLoop() { + defer s.wg.Done() + + for { + s.mu.RLock() + st := time.Now() + lastUnsyncedWatchers := s.unsynced.size() + s.mu.RUnlock() + + unsyncedWatchers := 0 + if lastUnsyncedWatchers > 0 { + unsyncedWatchers = s.syncWatchers() + } + syncDuration := time.Since(st) + + waitDuration := 100 * time.Millisecond + // more work pending? + if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers { + // be fair to other store operations by yielding time taken + waitDuration = syncDuration + } + + select { + case <-time.After(waitDuration): + case <-s.stopc: + return + } + } +} + +// syncVictimsLoop tries to write precomputed watcher responses to +// watchers that had a blocked watcher channel +func (s *watchableStore) syncVictimsLoop() { + defer s.wg.Done() + + for { + for s.moveVictims() != 0 { + // try to update all victim watchers + } + s.mu.RLock() + isEmpty := len(s.victims) == 0 + s.mu.RUnlock() + + var tickc <-chan time.Time + if !isEmpty { + tickc = time.After(10 * time.Millisecond) + } + + select { + case <-tickc: + case <-s.victimc: + case <-s.stopc: + return + } + } +} + +// moveVictims tries to update watches with already pending event data +func (s *watchableStore) moveVictims() (moved int) { + s.mu.Lock() + victims := s.victims + s.victims = nil + s.mu.Unlock() + + var newVictim watcherBatch + for _, wb := range victims { + // try to send responses again + for w, eb := range wb { + // watcher has observed the store up to, but not including, w.minRev + rev := w.minRev - 1 + if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { + pendingEventsGauge.Add(float64(len(eb.evs))) + } else { + if newVictim == nil { + newVictim = make(watcherBatch) + } + newVictim[w] = eb + continue + } + moved++ + } + + // assign completed victim watchers to unsync/sync + s.mu.Lock() + s.store.revMu.RLock() + curRev := s.store.currentRev + for w, eb := range wb { + if newVictim != nil && newVictim[w] != nil { + // couldn't send watch response; stays victim + continue + } + w.victim = false + if eb.moreRev != 0 { + w.minRev = eb.moreRev + } + if w.minRev <= curRev { + s.unsynced.add(w) + } else { + slowWatcherGauge.Dec() + s.synced.add(w) + } + } + s.store.revMu.RUnlock() + s.mu.Unlock() + } + + if len(newVictim) > 0 { + s.mu.Lock() + s.victims = append(s.victims, newVictim) + s.mu.Unlock() + } + + return moved +} + +// syncWatchers syncs unsynced watchers by: +// 1. choose a set of watchers from the unsynced watcher group +// 2. iterate over the set to get the minimum revision and remove compacted watchers +// 3. use minimum revision to get all key-value pairs and send those events to watchers +// 4. remove synced watchers in set from unsynced group and move to synced group +func (s *watchableStore) syncWatchers() int { + s.mu.Lock() + defer s.mu.Unlock() + + if s.unsynced.size() == 0 { + return 0 + } + + s.store.revMu.RLock() + defer s.store.revMu.RUnlock() + + // in order to find key-value pairs from unsynced watchers, we need to + // find min revision index, and these revisions can be used to + // query the backend store of key-value pairs + curRev := s.store.currentRev + compactionRev := s.store.compactMainRev + + wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev) + minBytes, maxBytes := newRevBytes(), newRevBytes() + revToBytes(revision{main: minRev}, minBytes) + revToBytes(revision{main: curRev + 1}, maxBytes) + + // UnsafeRange returns keys and values. And in boltdb, keys are revisions. + // values are actual key-value pairs in backend. + tx := s.store.b.ReadTx() + tx.Lock() + revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0) + evs := kvsToEvents(wg, revs, vs) + tx.Unlock() + + var victims watcherBatch + wb := newWatcherBatch(wg, evs) + for w := range wg.watchers { + w.minRev = curRev + 1 + + eb, ok := wb[w] + if !ok { + // bring un-notified watcher to synced + s.synced.add(w) + s.unsynced.delete(w) + continue + } + + if eb.moreRev != 0 { + w.minRev = eb.moreRev + } + + if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) { + pendingEventsGauge.Add(float64(len(eb.evs))) + } else { + if victims == nil { + victims = make(watcherBatch) + } + w.victim = true + } + + if w.victim { + victims[w] = eb + } else { + if eb.moreRev != 0 { + // stay unsynced; more to read + continue + } + s.synced.add(w) + } + s.unsynced.delete(w) + } + s.addVictim(victims) + + vsz := 0 + for _, v := range s.victims { + vsz += len(v) + } + slowWatcherGauge.Set(float64(s.unsynced.size() + vsz)) + + return s.unsynced.size() +} + +// kvsToEvents gets all events for the watchers from all key-value pairs +func kvsToEvents(wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) { + for i, v := range vals { + var kv mvccpb.KeyValue + if err := kv.Unmarshal(v); err != nil { + plog.Panicf("cannot unmarshal event: %v", err) + } + + if !wg.contains(string(kv.Key)) { + continue + } + + ty := mvccpb.PUT + if isTombstone(revs[i]) { + ty = mvccpb.DELETE + // patch in mod revision so watchers won't skip + kv.ModRevision = bytesToRev(revs[i]).main + } + evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty}) + } + return evs +} + +// notify notifies the fact that given event at the given rev just happened to +// watchers that watch on the key of the event. +func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { + var victim watcherBatch + for w, eb := range newWatcherBatch(&s.synced, evs) { + if eb.revs != 1 { + plog.Panicf("unexpected multiple revisions in notification") + } + + if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { + pendingEventsGauge.Add(float64(len(eb.evs))) + } else { + // move slow watcher to victims + w.minRev = rev + 1 + if victim == nil { + victim = make(watcherBatch) + } + w.victim = true + victim[w] = eb + s.synced.delete(w) + slowWatcherGauge.Inc() + } + } + s.addVictim(victim) +} + +func (s *watchableStore) addVictim(victim watcherBatch) { + if victim == nil { + return + } + s.victims = append(s.victims, victim) + select { + case s.victimc <- struct{}{}: + default: + } +} + +func (s *watchableStore) rev() int64 { return s.store.Rev() } + +func (s *watchableStore) progress(w *watcher) { + s.mu.RLock() + defer s.mu.RUnlock() + + if _, ok := s.synced.watchers[w]; ok { + w.send(WatchResponse{WatchID: w.id, Revision: s.rev()}) + // If the ch is full, this watcher is receiving events. + // We do not need to send progress at all. + } +} + +type watcher struct { + // the watcher key + key []byte + // end indicates the end of the range to watch. + // If end is set, the watcher is on a range. + end []byte + + // victim is set when ch is blocked and undergoing victim processing + victim bool + + // compacted is set when the watcher is removed because of compaction + compacted bool + + // minRev is the minimum revision update the watcher will accept + minRev int64 + id WatchID + + fcs []FilterFunc + // a chan to send out the watch response. + // The chan might be shared with other watchers. + ch chan<- WatchResponse +} + +func (w *watcher) send(wr WatchResponse) bool { + progressEvent := len(wr.Events) == 0 + + if len(w.fcs) != 0 { + ne := make([]mvccpb.Event, 0, len(wr.Events)) + for i := range wr.Events { + filtered := false + for _, filter := range w.fcs { + if filter(wr.Events[i]) { + filtered = true + break + } + } + if !filtered { + ne = append(ne, wr.Events[i]) + } + } + wr.Events = ne + } + + // if all events are filtered out, we should send nothing. + if !progressEvent && len(wr.Events) == 0 { + return true + } + select { + case w.ch <- wr: + return true + default: + return false + } +} diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go new file mode 100644 index 000000000..5c5bfda13 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go @@ -0,0 +1,53 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/mvcc/mvccpb" +) + +func (tw *watchableStoreTxnWrite) End() { + changes := tw.Changes() + if len(changes) == 0 { + tw.TxnWrite.End() + return + } + + rev := tw.Rev() + 1 + evs := make([]mvccpb.Event, len(changes)) + for i, change := range changes { + evs[i].Kv = &changes[i] + if change.CreateRevision == 0 { + evs[i].Type = mvccpb.DELETE + evs[i].Kv.ModRevision = rev + } else { + evs[i].Type = mvccpb.PUT + } + } + + // end write txn under watchable store lock so the updates are visible + // when asynchronous event posting checks the current store revision + tw.s.mu.Lock() + tw.s.notify(rev, evs) + tw.TxnWrite.End() + tw.s.mu.Unlock() +} + +type watchableStoreTxnWrite struct { + TxnWrite + s *watchableStore +} + +func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} } diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher.go b/vendor/github.com/coreos/etcd/mvcc/watcher.go new file mode 100644 index 000000000..9468d4269 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/watcher.go @@ -0,0 +1,171 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "bytes" + "errors" + "sync" + + "github.com/coreos/etcd/mvcc/mvccpb" +) + +var ( + ErrWatcherNotExist = errors.New("mvcc: watcher does not exist") +) + +type WatchID int64 + +// FilterFunc returns true if the given event should be filtered out. +type FilterFunc func(e mvccpb.Event) bool + +type WatchStream interface { + // Watch creates a watcher. The watcher watches the events happening or + // happened on the given key or range [key, end) from the given startRev. + // + // The whole event history can be watched unless compacted. + // If `startRev` <=0, watch observes events after currentRev. + // + // The returned `id` is the ID of this watcher. It appears as WatchID + // in events that are sent to the created watcher through stream channel. + // + Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID + + // Chan returns a chan. All watch response will be sent to the returned chan. + Chan() <-chan WatchResponse + + // RequestProgress requests the progress of the watcher with given ID. The response + // will only be sent if the watcher is currently synced. + // The responses will be sent through the WatchRespone Chan attached + // with this stream to ensure correct ordering. + // The responses contains no events. The revision in the response is the progress + // of the watchers since the watcher is currently synced. + RequestProgress(id WatchID) + + // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be + // returned. + Cancel(id WatchID) error + + // Close closes Chan and release all related resources. + Close() + + // Rev returns the current revision of the KV the stream watches on. + Rev() int64 +} + +type WatchResponse struct { + // WatchID is the WatchID of the watcher this response sent to. + WatchID WatchID + + // Events contains all the events that needs to send. + Events []mvccpb.Event + + // Revision is the revision of the KV when the watchResponse is created. + // For a normal response, the revision should be the same as the last + // modified revision inside Events. For a delayed response to a unsynced + // watcher, the revision is greater than the last modified revision + // inside Events. + Revision int64 + + // CompactRevision is set when the watcher is cancelled due to compaction. + CompactRevision int64 +} + +// watchStream contains a collection of watchers that share +// one streaming chan to send out watched events and other control events. +type watchStream struct { + watchable watchable + ch chan WatchResponse + + mu sync.Mutex // guards fields below it + // nextID is the ID pre-allocated for next new watcher in this stream + nextID WatchID + closed bool + cancels map[WatchID]cancelFunc + watchers map[WatchID]*watcher +} + +// Watch creates a new watcher in the stream and returns its WatchID. +// TODO: return error if ws is closed? +func (ws *watchStream) Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID { + // prevent wrong range where key >= end lexicographically + // watch request with 'WithFromKey' has empty-byte range end + if len(end) != 0 && bytes.Compare(key, end) != -1 { + return -1 + } + + ws.mu.Lock() + defer ws.mu.Unlock() + if ws.closed { + return -1 + } + + id := ws.nextID + ws.nextID++ + + w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...) + + ws.cancels[id] = c + ws.watchers[id] = w + return id +} + +func (ws *watchStream) Chan() <-chan WatchResponse { + return ws.ch +} + +func (ws *watchStream) Cancel(id WatchID) error { + ws.mu.Lock() + cancel, ok := ws.cancels[id] + ok = ok && !ws.closed + if ok { + delete(ws.cancels, id) + delete(ws.watchers, id) + } + ws.mu.Unlock() + if !ok { + return ErrWatcherNotExist + } + cancel() + return nil +} + +func (ws *watchStream) Close() { + ws.mu.Lock() + defer ws.mu.Unlock() + + for _, cancel := range ws.cancels { + cancel() + } + ws.closed = true + close(ws.ch) + watchStreamGauge.Dec() +} + +func (ws *watchStream) Rev() int64 { + ws.mu.Lock() + defer ws.mu.Unlock() + return ws.watchable.rev() +} + +func (ws *watchStream) RequestProgress(id WatchID) { + ws.mu.Lock() + w, ok := ws.watchers[id] + ws.mu.Unlock() + if !ok { + return + } + ws.watchable.progress(w) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go new file mode 100644 index 000000000..6ef1d0ce8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go @@ -0,0 +1,283 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "math" + + "github.com/coreos/etcd/mvcc/mvccpb" + "github.com/coreos/etcd/pkg/adt" +) + +var ( + // watchBatchMaxRevs is the maximum distinct revisions that + // may be sent to an unsynced watcher at a time. Declared as + // var instead of const for testing purposes. + watchBatchMaxRevs = 1000 +) + +type eventBatch struct { + // evs is a batch of revision-ordered events + evs []mvccpb.Event + // revs is the minimum unique revisions observed for this batch + revs int + // moreRev is first revision with more events following this batch + moreRev int64 +} + +func (eb *eventBatch) add(ev mvccpb.Event) { + if eb.revs > watchBatchMaxRevs { + // maxed out batch size + return + } + + if len(eb.evs) == 0 { + // base case + eb.revs = 1 + eb.evs = append(eb.evs, ev) + return + } + + // revision accounting + ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision + evRev := ev.Kv.ModRevision + if evRev > ebRev { + eb.revs++ + if eb.revs > watchBatchMaxRevs { + eb.moreRev = evRev + return + } + } + + eb.evs = append(eb.evs, ev) +} + +type watcherBatch map[*watcher]*eventBatch + +func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) { + eb := wb[w] + if eb == nil { + eb = &eventBatch{} + wb[w] = eb + } + eb.add(ev) +} + +// newWatcherBatch maps watchers to their matched events. It enables quick +// events look up by watcher. +func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch { + if len(wg.watchers) == 0 { + return nil + } + + wb := make(watcherBatch) + for _, ev := range evs { + for w := range wg.watcherSetByKey(string(ev.Kv.Key)) { + if ev.Kv.ModRevision >= w.minRev { + // don't double notify + wb.add(w, ev) + } + } + } + return wb +} + +type watcherSet map[*watcher]struct{} + +func (w watcherSet) add(wa *watcher) { + if _, ok := w[wa]; ok { + panic("add watcher twice!") + } + w[wa] = struct{}{} +} + +func (w watcherSet) union(ws watcherSet) { + for wa := range ws { + w.add(wa) + } +} + +func (w watcherSet) delete(wa *watcher) { + if _, ok := w[wa]; !ok { + panic("removing missing watcher!") + } + delete(w, wa) +} + +type watcherSetByKey map[string]watcherSet + +func (w watcherSetByKey) add(wa *watcher) { + set := w[string(wa.key)] + if set == nil { + set = make(watcherSet) + w[string(wa.key)] = set + } + set.add(wa) +} + +func (w watcherSetByKey) delete(wa *watcher) bool { + k := string(wa.key) + if v, ok := w[k]; ok { + if _, ok := v[wa]; ok { + delete(v, wa) + if len(v) == 0 { + // remove the set; nothing left + delete(w, k) + } + return true + } + } + return false +} + +// watcherGroup is a collection of watchers organized by their ranges +type watcherGroup struct { + // keyWatchers has the watchers that watch on a single key + keyWatchers watcherSetByKey + // ranges has the watchers that watch a range; it is sorted by interval + ranges adt.IntervalTree + // watchers is the set of all watchers + watchers watcherSet +} + +func newWatcherGroup() watcherGroup { + return watcherGroup{ + keyWatchers: make(watcherSetByKey), + watchers: make(watcherSet), + } +} + +// add puts a watcher in the group. +func (wg *watcherGroup) add(wa *watcher) { + wg.watchers.add(wa) + if wa.end == nil { + wg.keyWatchers.add(wa) + return + } + + // interval already registered? + ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) + if iv := wg.ranges.Find(ivl); iv != nil { + iv.Val.(watcherSet).add(wa) + return + } + + // not registered, put in interval tree + ws := make(watcherSet) + ws.add(wa) + wg.ranges.Insert(ivl, ws) +} + +// contains is whether the given key has a watcher in the group. +func (wg *watcherGroup) contains(key string) bool { + _, ok := wg.keyWatchers[key] + return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key)) +} + +// size gives the number of unique watchers in the group. +func (wg *watcherGroup) size() int { return len(wg.watchers) } + +// delete removes a watcher from the group. +func (wg *watcherGroup) delete(wa *watcher) bool { + if _, ok := wg.watchers[wa]; !ok { + return false + } + wg.watchers.delete(wa) + if wa.end == nil { + wg.keyWatchers.delete(wa) + return true + } + + ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) + iv := wg.ranges.Find(ivl) + if iv == nil { + return false + } + + ws := iv.Val.(watcherSet) + delete(ws, wa) + if len(ws) == 0 { + // remove interval missing watchers + if ok := wg.ranges.Delete(ivl); !ok { + panic("could not remove watcher from interval tree") + } + } + + return true +} + +// choose selects watchers from the watcher group to update +func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) { + if len(wg.watchers) < maxWatchers { + return wg, wg.chooseAll(curRev, compactRev) + } + ret := newWatcherGroup() + for w := range wg.watchers { + if maxWatchers <= 0 { + break + } + maxWatchers-- + ret.add(w) + } + return &ret, ret.chooseAll(curRev, compactRev) +} + +func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 { + minRev := int64(math.MaxInt64) + for w := range wg.watchers { + if w.minRev > curRev { + panic("watcher current revision should not exceed current revision") + } + if w.minRev < compactRev { + select { + case w.ch <- WatchResponse{WatchID: w.id, CompactRevision: compactRev}: + w.compacted = true + wg.delete(w) + default: + // retry next time + } + continue + } + if minRev > w.minRev { + minRev = w.minRev + } + } + return minRev +} + +// watcherSetByKey gets the set of watchers that receive events on the given key. +func (wg *watcherGroup) watcherSetByKey(key string) watcherSet { + wkeys := wg.keyWatchers[key] + wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key)) + + // zero-copy cases + switch { + case len(wranges) == 0: + // no need to merge ranges or copy; reuse single-key set + return wkeys + case len(wranges) == 0 && len(wkeys) == 0: + return nil + case len(wranges) == 1 && len(wkeys) == 0: + return wranges[0].Val.(watcherSet) + } + + // copy case + ret := make(watcherSet) + ret.union(wg.keyWatchers[key]) + for _, item := range wranges { + ret.union(item.Val.(watcherSet)) + } + return ret +} diff --git a/vendor/github.com/docker/cli/cli/cobra.go b/vendor/github.com/docker/cli/cli/cobra.go new file mode 100644 index 000000000..114f1b7d6 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/cobra.go @@ -0,0 +1,150 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// SetupRootCommand sets default usage, help, and error handling for the +// root command. +func SetupRootCommand(rootCmd *cobra.Command) { + cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) + cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) + cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) + cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) + + rootCmd.SetUsageTemplate(usageTemplate) + rootCmd.SetHelpTemplate(helpTemplate) + rootCmd.SetFlagErrorFunc(FlagErrorFunc) + rootCmd.SetHelpCommand(helpCommand) + + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") + rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") +} + +// FlagErrorFunc prints an error message which matches the format of the +// docker/cli/cli error messages +func FlagErrorFunc(cmd *cobra.Command, err error) error { + if err == nil { + return nil + } + + usage := "" + if cmd.HasSubCommands() { + usage = "\n\n" + cmd.UsageString() + } + return StatusError{ + Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), + StatusCode: 125, + } +} + +var helpCommand = &cobra.Command{ + Use: "help [command]", + Short: "Help about the command", + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + PersistentPostRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(c *cobra.Command, args []string) error { + cmd, args, e := c.Root().Find(args) + if cmd == nil || e != nil || len(args) > 0 { + return errors.Errorf("unknown help topic: %v", strings.Join(args, " ")) + } + + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + return nil + }, +} + +func hasSubCommands(cmd *cobra.Command) bool { + return len(operationSubCommands(cmd)) > 0 +} + +func hasManagementSubCommands(cmd *cobra.Command) bool { + return len(managementSubCommands(cmd)) > 0 +} + +func operationSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && !sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func wrappedFlagUsages(cmd *cobra.Command) string { + width := 80 + if ws, err := term.GetWinsize(0); err == nil { + width = int(ws.Width) + } + return cmd.Flags().FlagUsagesWrapped(width - 1) +} + +func managementSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +var usageTemplate = `Usage: + +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} + +{{ .Short | trim }} + +{{- if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} + +{{- end}} +{{- if .HasExample}} + +Examples: +{{ .Example }} + +{{- end}} +{{- if .HasFlags}} + +Options: +{{ wrappedFlagUsages . | trimRightSpace}} + +{{- end}} +{{- if hasManagementSubCommands . }} + +Management Commands: + +{{- range managementSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} + +{{- end}} +{{- if hasSubCommands .}} + +Commands: + +{{- range operationSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} +{{- end}} + +{{- if .HasSubCommands }} + +Run '{{.CommandPath}} COMMAND --help' for more information on a command. +{{- end}} +` + +var helpTemplate = ` +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/vendor/github.com/docker/cli/cli/command/cli.go b/vendor/github.com/docker/cli/cli/command/cli.go new file mode 100644 index 000000000..59efa9ac2 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/cli.go @@ -0,0 +1,305 @@ +package command + +import ( + "fmt" + "io" + "net/http" + "os" + "runtime" + + "github.com/docker/cli/cli" + cliconfig "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/credentials" + cliflags "github.com/docker/cli/cli/flags" + dopts "github.com/docker/cli/opts" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/client" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/notary/passphrase" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +// Streams is an interface which exposes the standard input and output streams +type Streams interface { + In() *InStream + Out() *OutStream + Err() io.Writer +} + +// Cli represents the docker command line client. +type Cli interface { + Client() client.APIClient + Out() *OutStream + Err() io.Writer + In() *InStream + SetIn(in *InStream) + ConfigFile() *configfile.ConfigFile + CredentialsStore(serverAddress string) credentials.Store +} + +// DockerCli is an instance the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + configFile *configfile.ConfigFile + in *InStream + out *OutStream + err io.Writer + client client.APIClient + defaultVersion string + server ServerInfo +} + +// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified. +func (cli *DockerCli) DefaultVersion() string { + return cli.defaultVersion +} + +// Client returns the APIClient +func (cli *DockerCli) Client() client.APIClient { + return cli.client +} + +// Out returns the writer used for stdout +func (cli *DockerCli) Out() *OutStream { + return cli.out +} + +// Err returns the writer used for stderr +func (cli *DockerCli) Err() io.Writer { + return cli.err +} + +// SetIn sets the reader used for stdin +func (cli *DockerCli) SetIn(in *InStream) { + cli.in = in +} + +// In returns the reader used for stdin +func (cli *DockerCli) In() *InStream { + return cli.in +} + +// ShowHelp shows the command help. +func ShowHelp(err io.Writer) func(*cobra.Command, []string) error { + return func(cmd *cobra.Command, args []string) error { + cmd.SetOutput(err) + cmd.HelpFunc()(cmd, args) + return nil + } +} + +// ConfigFile returns the ConfigFile +func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { + return cli.configFile +} + +// ServerInfo returns the server version details for the host this client is +// connected to +func (cli *DockerCli) ServerInfo() ServerInfo { + return cli.server +} + +// GetAllCredentials returns all of the credentials stored in all of the +// configured credential stores. +func (cli *DockerCli) GetAllCredentials() (map[string]types.AuthConfig, error) { + auths := make(map[string]types.AuthConfig) + for registry := range cli.configFile.CredentialHelpers { + helper := cli.CredentialsStore(registry) + newAuths, err := helper.GetAll() + if err != nil { + return nil, err + } + addAll(auths, newAuths) + } + defaultStore := cli.CredentialsStore("") + newAuths, err := defaultStore.GetAll() + if err != nil { + return nil, err + } + addAll(auths, newAuths) + return auths, nil +} + +func addAll(to, from map[string]types.AuthConfig) { + for reg, ac := range from { + to[reg] = ac + } +} + +// CredentialsStore returns a new credentials store based +// on the settings provided in the configuration file. Empty string returns +// the default credential store. +func (cli *DockerCli) CredentialsStore(serverAddress string) credentials.Store { + if helper := getConfiguredCredentialStore(cli.configFile, serverAddress); helper != "" { + return credentials.NewNativeStore(cli.configFile, helper) + } + return credentials.NewFileStore(cli.configFile) +} + +// getConfiguredCredentialStore returns the credential helper configured for the +// given registry, the default credsStore, or the empty string if neither are +// configured. +func getConfiguredCredentialStore(c *configfile.ConfigFile, serverAddress string) string { + if c.CredentialHelpers != nil && serverAddress != "" { + if helper, exists := c.CredentialHelpers[serverAddress]; exists { + return helper + } + } + return c.CredentialsStore +} + +// Initialize the dockerCli runs initialization that must happen after command +// line flags are parsed. +func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error { + cli.configFile = LoadDefaultConfigFile(cli.err) + + var err error + cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) + if tlsconfig.IsErrEncryptedKey(err) { + var ( + passwd string + giveup bool + ) + passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil) + + for attempts := 0; tlsconfig.IsErrEncryptedKey(err); attempts++ { + // some code and comments borrowed from notary/trustmanager/keystore.go + passwd, giveup, err = passRetriever("private", "encrypted TLS private", false, attempts) + // Check if the passphrase retriever got an error or if it is telling us to give up + if giveup || err != nil { + return errors.Wrap(err, "private key is encrypted, but could not get passphrase") + } + + opts.Common.TLSOptions.Passphrase = passwd + cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) + } + } + + if err != nil { + return err + } + + cli.defaultVersion = cli.client.ClientVersion() + + if ping, err := cli.client.Ping(context.Background()); err == nil { + cli.server = ServerInfo{ + HasExperimental: ping.Experimental, + OSType: ping.OSType, + } + + // since the new header was added in 1.25, assume server is 1.24 if header is not present. + if ping.APIVersion == "" { + ping.APIVersion = "1.24" + } + + // if server version is lower than the current cli, downgrade + if versions.LessThan(ping.APIVersion, cli.client.ClientVersion()) { + cli.client.UpdateClientVersion(ping.APIVersion) + } + } + + return nil +} + +// ServerInfo stores details about the supported features and platform of the +// server +type ServerInfo struct { + HasExperimental bool + OSType string +} + +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli { + return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err} +} + +// LoadDefaultConfigFile attempts to load the default config file and returns +// an initialized ConfigFile struct if none is found. +func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile { + configFile, e := cliconfig.Load(cliconfig.Dir()) + if e != nil { + fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e) + } + if !configFile.ContainsAuth() { + credentials.DetectDefaultStore(configFile) + } + return configFile +} + +// NewAPIClientFromFlags creates a new APIClient from command line flags +func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { + host, err := getServerHost(opts.Hosts, opts.TLSOptions) + if err != nil { + return &client.Client{}, err + } + + customHeaders := configFile.HTTPHeaders + if customHeaders == nil { + customHeaders = map[string]string{} + } + customHeaders["User-Agent"] = UserAgent() + + verStr := api.DefaultVersion + if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { + verStr = tmpStr + } + + httpClient, err := newHTTPClient(host, opts.TLSOptions) + if err != nil { + return &client.Client{}, err + } + + return client.NewClient(host, verStr, httpClient, customHeaders) +} + +func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { + switch len(hosts) { + case 0: + host = os.Getenv("DOCKER_HOST") + case 1: + host = hosts[0] + default: + return "", errors.New("Please specify only one -H") + } + + host, err = dopts.ParseHost(tlsOptions != nil, host) + return +} + +func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { + if tlsOptions == nil { + // let the api client configure the default transport. + return nil, nil + } + opts := *tlsOptions + opts.ExclusiveRootPools = true + config, err := tlsconfig.Client(opts) + if err != nil { + return nil, err + } + tr := &http.Transport{ + TLSClientConfig: config, + } + proto, addr, _, err := client.ParseHost(host) + if err != nil { + return nil, err + } + + sockets.ConfigureTransport(tr, proto, addr) + + return &http.Client{ + Transport: tr, + CheckRedirect: client.CheckRedirect, + }, nil +} + +// UserAgent returns the user agent string used for making API requests +func UserAgent() string { + return "Docker-Client/" + cli.Version + " (" + runtime.GOOS + ")" +} diff --git a/vendor/github.com/docker/cli/cli/command/events_utils.go b/vendor/github.com/docker/cli/cli/command/events_utils.go new file mode 100644 index 000000000..c271b2050 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/events_utils.go @@ -0,0 +1,47 @@ +package command + +import ( + "sync" + + "github.com/Sirupsen/logrus" + eventtypes "github.com/docker/docker/api/types/events" +) + +// EventHandler is abstract interface for user to customize +// own handle functions of each type of events +type EventHandler interface { + Handle(action string, h func(eventtypes.Message)) + Watch(c <-chan eventtypes.Message) +} + +// InitEventHandler initializes and returns an EventHandler +func InitEventHandler() EventHandler { + return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} +} + +type eventHandler struct { + handlers map[string]func(eventtypes.Message) + mu sync.Mutex +} + +func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { + w.mu.Lock() + w.handlers[action] = h + w.mu.Unlock() +} + +// Watch ranges over the passed in event chan and processes the events based on the +// handlers created for a given action. +// To stop watching, close the event chan. +func (w *eventHandler) Watch(c <-chan eventtypes.Message) { + for e := range c { + w.mu.Lock() + h, exists := w.handlers[e.Action] + w.mu.Unlock() + if !exists { + continue + } + logrus.Debugf("event handler: received event: %v", e) + go h(e) + } +} diff --git a/vendor/github.com/docker/cli/cli/command/image/build.go b/vendor/github.com/docker/cli/cli/command/image/build.go new file mode 100644 index 000000000..da28e898e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/build.go @@ -0,0 +1,500 @@ +package image + +import ( + "archive/tar" + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "runtime" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/image/build" + "github.com/docker/cli/opts" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/urlutil" + runconfigopts "github.com/docker/docker/runconfig/opts" + units "github.com/docker/go-units" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type buildOptions struct { + context string + dockerfileName string + tags opts.ListOpts + labels opts.ListOpts + buildArgs opts.ListOpts + extraHosts opts.ListOpts + ulimits *opts.UlimitOpt + memory opts.MemBytes + memorySwap opts.MemSwapBytes + shmSize opts.MemBytes + cpuShares int64 + cpuPeriod int64 + cpuQuota int64 + cpuSetCpus string + cpuSetMems string + cgroupParent string + isolation string + quiet bool + noCache bool + rm bool + forceRm bool + pull bool + cacheFrom []string + compress bool + securityOpt []string + networkMode string + squash bool + target string + imageIDFile string +} + +// dockerfileFromStdin returns true when the user specified that the Dockerfile +// should be read from stdin instead of a file +func (o buildOptions) dockerfileFromStdin() bool { + return o.dockerfileName == "-" +} + +// contextFromStdin returns true when the user specified that the build context +// should be read from stdin +func (o buildOptions) contextFromStdin() bool { + return o.context == "-" +} + +// NewBuildCommand creates a new `docker build` command +func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command { + ulimits := make(map[string]*units.Ulimit) + options := buildOptions{ + tags: opts.NewListOpts(validateTag), + buildArgs: opts.NewListOpts(opts.ValidateEnv), + ulimits: opts.NewUlimitOpt(&ulimits), + labels: opts.NewListOpts(opts.ValidateEnv), + extraHosts: opts.NewListOpts(opts.ValidateExtraHost), + } + + cmd := &cobra.Command{ + Use: "build [OPTIONS] PATH | URL | -", + Short: "Build an image from a Dockerfile", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.context = args[0] + return runBuild(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") + flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") + flags.Var(options.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") + flags.VarP(&options.memory, "memory", "m", "Memory limit") + flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm") + flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") + flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") + flags.Var(&options.labels, "label", "Set metadata for an image") + flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") + flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") + flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") + flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") + flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources") + flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip") + flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options") + flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build") + flags.SetAnnotation("network", "version", []string{"1.25"}) + flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") + flags.StringVar(&options.target, "target", "", "Set the target build stage to build.") + flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file") + + command.AddTrustVerificationFlags(flags) + + flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer") + flags.SetAnnotation("squash", "experimental", nil) + flags.SetAnnotation("squash", "version", []string{"1.25"}) + + return cmd +} + +// lastProgressOutput is the same as progress.Output except +// that it only output with the last update. It is used in +// non terminal scenarios to suppress verbose messages +type lastProgressOutput struct { + output progress.Output +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error { + if !prog.LastUpdate { + return nil + } + + return out.output.WriteProgress(prog) +} + +// nolint: gocyclo +func runBuild(dockerCli *command.DockerCli, options buildOptions) error { + var ( + buildCtx io.ReadCloser + dockerfileCtx io.ReadCloser + err error + contextDir string + tempDir string + relDockerfile string + progBuff io.Writer + buildBuff io.Writer + ) + + if options.dockerfileFromStdin() { + if options.contextFromStdin() { + return errors.New("invalid argument: can't use stdin for both build context and dockerfile") + } + dockerfileCtx = dockerCli.In() + } + + specifiedContext := options.context + progBuff = dockerCli.Out() + buildBuff = dockerCli.Out() + if options.quiet { + progBuff = bytes.NewBuffer(nil) + buildBuff = bytes.NewBuffer(nil) + } + if options.imageIDFile != "" { + // Avoid leaving a stale file if we eventually fail + if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "Removing image ID file") + } + } + + switch { + case options.contextFromStdin(): + buildCtx, relDockerfile, err = build.GetContextFromReader(dockerCli.In(), options.dockerfileName) + case isLocalDir(specifiedContext): + contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, options.dockerfileName) + case urlutil.IsGitURL(specifiedContext): + tempDir, relDockerfile, err = build.GetContextFromGitURL(specifiedContext, options.dockerfileName) + case urlutil.IsURL(specifiedContext): + buildCtx, relDockerfile, err = build.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) + default: + return errors.Errorf("unable to prepare context: path %q not found", specifiedContext) + } + + if err != nil { + if options.quiet && urlutil.IsURL(specifiedContext) { + fmt.Fprintln(dockerCli.Err(), progBuff) + } + return errors.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + if buildCtx == nil { + excludes, err := build.ReadDockerignore(contextDir) + if err != nil { + return err + } + + if err := build.ValidateContextDirectory(contextDir, excludes); err != nil { + return errors.Errorf("error checking context: '%s'.", err) + } + + // And canonicalize dockerfile name to a platform-independent one + relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) + if err != nil { + return errors.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) + } + + excludes = build.TrimBuildFilesFromExcludes(excludes, relDockerfile, options.dockerfileFromStdin()) + + compression := archive.Uncompressed + if options.compress { + compression = archive.Gzip + } + buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + Compression: compression, + ExcludePatterns: excludes, + }) + if err != nil { + return err + } + } + + // replace Dockerfile if added dynamically + if dockerfileCtx != nil { + buildCtx, relDockerfile, err = build.AddDockerfileToBuildContext(dockerfileCtx, buildCtx) + if err != nil { + return err + } + } + + ctx := context.Background() + + var resolvedTags []*resolvedTag + if command.IsTrusted() { + translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { + return TrustedReference(ctx, dockerCli, ref, nil) + } + // Wrap the tar archive to replace the Dockerfile entry with the rewritten + // Dockerfile which uses trusted pulls. + buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags) + } + + // Setup an upload progress bar + progressOutput := streamformatter.NewProgressOutput(progBuff) + if !dockerCli.Out().IsTerminal() { + progressOutput = &lastProgressOutput{output: progressOutput} + } + + var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") + + authConfigs, _ := dockerCli.GetAllCredentials() + buildOptions := types.ImageBuildOptions{ + Memory: options.memory.Value(), + MemorySwap: options.memorySwap.Value(), + Tags: options.tags.GetAll(), + SuppressOutput: options.quiet, + NoCache: options.noCache, + Remove: options.rm, + ForceRemove: options.forceRm, + PullParent: options.pull, + Isolation: container.Isolation(options.isolation), + CPUSetCPUs: options.cpuSetCpus, + CPUSetMems: options.cpuSetMems, + CPUShares: options.cpuShares, + CPUQuota: options.cpuQuota, + CPUPeriod: options.cpuPeriod, + CgroupParent: options.cgroupParent, + Dockerfile: relDockerfile, + ShmSize: options.shmSize.Value(), + Ulimits: options.ulimits.GetList(), + BuildArgs: runconfigopts.ConvertKVStringsToMapWithNil(options.buildArgs.GetAll()), + AuthConfigs: authConfigs, + Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()), + CacheFrom: options.cacheFrom, + SecurityOpt: options.securityOpt, + NetworkMode: options.networkMode, + Squash: options.squash, + ExtraHosts: options.extraHosts.GetAll(), + Target: options.target, + } + + response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) + if err != nil { + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s", progBuff) + } + return err + } + defer response.Body.Close() + + imageID := "" + aux := func(auxJSON *json.RawMessage) { + var result types.BuildResult + if err := json.Unmarshal(*auxJSON, &result); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err) + } else { + imageID = result.ID + } + } + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), aux) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) + } + return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + return err + } + + // Windows: show error message about modified file permissions if the + // daemon isn't running Windows. + if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { + fmt.Fprintln(dockerCli.Out(), "SECURITY WARNING: You are building a Docker "+ + "image from Windows against a non-Windows Docker host. All files and "+ + "directories added to build context will have '-rwxr-xr-x' permissions. "+ + "It is recommended to double check and reset permissions for sensitive "+ + "files and directories.") + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if options.quiet { + imageID = fmt.Sprintf("%s", buildBuff) + fmt.Fprintf(dockerCli.Out(), imageID) + } + + if options.imageIDFile != "" { + if imageID == "" { + return errors.Errorf("Server did not provide an image ID. Cannot write %s", options.imageIDFile) + } + if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil { + return err + } + } + if command.IsTrusted() { + // Since the build was successful, now we must tag any of the resolved + // images from the above Dockerfile rewrite. + for _, resolved := range resolvedTags { + if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil { + return err + } + } + } + + return nil +} + +func isLocalDir(c string) bool { + _, err := os.Stat(c) + return err == nil +} + +type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) + +// validateTag checks if the given image name can be resolved. +func validateTag(rawRepo string) (string, error) { + _, err := reference.ParseNormalizedNamed(rawRepo) + if err != nil { + return "", err + } + + return rawRepo, nil +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + digestRef reference.Canonical + tagRef reference.NamedTagged +} + +// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in +// "FROM " instructions to a digest reference. `translator` is a +// function that takes a repository name and tag reference and returns a +// trusted digest reference. +func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { + scanner := bufio.NewScanner(dockerfile) + buf := bytes.NewBuffer(nil) + + // Scan the lines of the Dockerfile, looking for a "FROM" line. + for scanner.Scan() { + line := scanner.Text() + + matches := dockerfileFromLinePattern.FindStringSubmatch(line) + if matches != nil && matches[1] != api.NoBaseImageSpecifier { + // Replace the line with a resolved "FROM repo@digest" + var ref reference.Named + ref, err = reference.ParseNormalizedNamed(matches[1]) + if err != nil { + return nil, nil, err + } + ref = reference.TagNameOnly(ref) + if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() { + trustedRef, err := translator(ctx, ref) + if err != nil { + return nil, nil, err + } + + line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", reference.FamiliarString(trustedRef))) + resolvedTags = append(resolvedTags, &resolvedTag{ + digestRef: trustedRef, + tagRef: ref, + }) + } + } + + _, err := fmt.Fprintln(buf, line) + if err != nil { + return nil, nil, err + } + } + + return buf.Bytes(), resolvedTags, scanner.Err() +} + +// replaceDockerfileTarWrapper wraps the given input tar archive stream and +// replaces the entry with the given Dockerfile name with the contents of the +// new Dockerfile. Returns a new tar archive stream with the replaced +// Dockerfile. +func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + content := io.Reader(tarReader) + if hdr.Name == dockerfileName { + // This entry is the Dockerfile. Since the tar archive was + // generated from a directory on the local filesystem, the + // Dockerfile will only appear once in the archive. + var newDockerfile []byte + newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator) + if err != nil { + pipeWriter.CloseWithError(err) + return + } + hdr.Size = int64(len(newDockerfile)) + content = bytes.NewBuffer(newDockerfile) + } + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} diff --git a/vendor/github.com/docker/cli/cli/command/image/cmd.go b/vendor/github.com/docker/cli/cli/command/image/cmd.go new file mode 100644 index 000000000..10357fcfd --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/cmd.go @@ -0,0 +1,34 @@ +package image + +import ( + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" +) + +// NewImageCommand returns a cobra command for `image` subcommands +// nolint: interfacer +func NewImageCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "image", + Short: "Manage images", + Args: cli.NoArgs, + RunE: command.ShowHelp(dockerCli.Err()), + } + cmd.AddCommand( + NewBuildCommand(dockerCli), + NewHistoryCommand(dockerCli), + NewImportCommand(dockerCli), + NewLoadCommand(dockerCli), + NewPullCommand(dockerCli), + NewPushCommand(dockerCli), + NewSaveCommand(dockerCli), + NewTagCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/cli/cli/command/image/history.go b/vendor/github.com/docker/cli/cli/command/image/history.go new file mode 100644 index 000000000..27782d107 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/history.go @@ -0,0 +1,64 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/spf13/cobra" +) + +type historyOptions struct { + image string + + human bool + quiet bool + noTrunc bool + format string +} + +// NewHistoryCommand creates a new `docker history` command +func NewHistoryCommand(dockerCli command.Cli) *cobra.Command { + var opts historyOptions + + cmd := &cobra.Command{ + Use: "history [OPTIONS] IMAGE", + Short: "Show the history of an image", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + return runHistory(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + + return cmd +} + +func runHistory(dockerCli command.Cli, opts historyOptions) error { + ctx := context.Background() + + history, err := dockerCli.Client().ImageHistory(ctx, opts.image) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + format = formatter.TableFormatKey + } + + historyCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewHistoryFormat(format, opts.quiet, opts.human), + Trunc: !opts.noTrunc, + } + return formatter.HistoryWrite(historyCtx, opts.human, history) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/import.go b/vendor/github.com/docker/cli/cli/command/image/import.go new file mode 100644 index 000000000..1f7189a95 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/import.go @@ -0,0 +1,87 @@ +package image + +import ( + "io" + "os" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + dockeropts "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/urlutil" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type importOptions struct { + source string + reference string + changes dockeropts.ListOpts + message string +} + +// NewImportCommand creates a new `docker import` command +func NewImportCommand(dockerCli command.Cli) *cobra.Command { + var options importOptions + + cmd := &cobra.Command{ + Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]", + Short: "Import the contents from a tarball to create a filesystem image", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.source = args[0] + if len(args) > 1 { + options.reference = args[1] + } + return runImport(dockerCli, options) + }, + } + + flags := cmd.Flags() + + options.changes = dockeropts.NewListOpts(nil) + flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image") + flags.StringVarP(&options.message, "message", "m", "", "Set commit message for imported image") + + return cmd +} + +func runImport(dockerCli command.Cli, options importOptions) error { + var ( + in io.Reader + srcName = options.source + ) + + if options.source == "-" { + in = dockerCli.In() + } else if !urlutil.IsURL(options.source) { + srcName = "-" + file, err := os.Open(options.source) + if err != nil { + return err + } + defer file.Close() + in = file + } + + source := types.ImageImportSource{ + Source: in, + SourceName: srcName, + } + + importOptions := types.ImageImportOptions{ + Message: options.message, + Changes: options.changes.GetAll(), + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ImageImport(context.Background(), source, options.reference, importOptions) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/inspect.go b/vendor/github.com/docker/cli/cli/command/image/inspect.go new file mode 100644 index 000000000..a510e3076 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/inspect.go @@ -0,0 +1,44 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker image inspect` +func newInspectCommand(dockerCli command.Cli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] IMAGE [IMAGE...]", + Short: "Display detailed information on one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli command.Cli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ImageInspectWithRaw(ctx, ref) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/list.go b/vendor/github.com/docker/cli/cli/command/image/list.go new file mode 100644 index 000000000..6dada8252 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/list.go @@ -0,0 +1,95 @@ +package image + +import ( + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/command/formatter" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type imagesOptions struct { + matchName string + + quiet bool + all bool + noTrunc bool + showDigests bool + format string + filter opts.FilterOpt +} + +// NewImagesCommand creates a new `docker images` command +func NewImagesCommand(dockerCli command.Cli) *cobra.Command { + options := imagesOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "images [OPTIONS] [REPOSITORY[:TAG]]", + Short: "List images", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + options.matchName = args[0] + } + return runImages(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVarP(&options.all, "all", "a", false, "Show all images (default hides intermediate images)") + flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVar(&options.showDigests, "digests", false, "Show digests") + flags.StringVar(&options.format, "format", "", "Pretty-print images using a Go template") + flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli command.Cli) *cobra.Command { + cmd := *NewImagesCommand(dockerCli) + cmd.Aliases = []string{"images", "list"} + cmd.Use = "ls [OPTIONS] [REPOSITORY[:TAG]]" + return &cmd +} + +func runImages(dockerCli command.Cli, options imagesOptions) error { + ctx := context.Background() + + filters := options.filter.Value() + if options.matchName != "" { + filters.Add("reference", options.matchName) + } + + listOptions := types.ImageListOptions{ + All: options.all, + Filters: filters, + } + + images, err := dockerCli.Client().ImageList(ctx, listOptions) + if err != nil { + return err + } + + format := options.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !options.quiet { + format = dockerCli.ConfigFile().ImagesFormat + } else { + format = formatter.TableFormatKey + } + } + + imageCtx := formatter.ImageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewImageFormat(format, options.quiet, options.showDigests), + Trunc: !options.noTrunc, + }, + Digest: options.showDigests, + } + return formatter.ImageWrite(imageCtx, images) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/load.go b/vendor/github.com/docker/cli/cli/command/image/load.go new file mode 100644 index 000000000..6708599fd --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/load.go @@ -0,0 +1,77 @@ +package image + +import ( + "io" + + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type loadOptions struct { + input string + quiet bool +} + +// NewLoadCommand creates a new `docker load` command +func NewLoadCommand(dockerCli command.Cli) *cobra.Command { + var opts loadOptions + + cmd := &cobra.Command{ + Use: "load [OPTIONS]", + Short: "Load an image from a tar archive or STDIN", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runLoad(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output") + + return cmd +} + +func runLoad(dockerCli command.Cli, opts loadOptions) error { + + var input io.Reader = dockerCli.In() + if opts.input != "" { + // We use system.OpenSequential to use sequential file access on Windows, avoiding + // depleting the standby list un-necessarily. On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(opts.input) + if err != nil { + return err + } + defer file.Close() + input = file + } + + // To avoid getting stuck, verify that a tar file is given either in + // the input flag or through stdin and if not display an error message and exit. + if opts.input == "" && dockerCli.In().IsTerminal() { + return errors.Errorf("requested load from stdin, but stdin is empty") + } + + if !dockerCli.Out().IsTerminal() { + opts.quiet = true + } + response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) + if err != nil { + return err + } + defer response.Body.Close() + + if response.Body != nil && response.JSON { + return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil) + } + + _, err = io.Copy(dockerCli.Out(), response.Body) + return err +} diff --git a/vendor/github.com/docker/cli/cli/command/image/prune.go b/vendor/github.com/docker/cli/cli/command/image/prune.go new file mode 100644 index 000000000..8e521b61b --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/prune.go @@ -0,0 +1,95 @@ +package image + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/opts" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool + filter opts.FilterOpt +} + +// NewPruneCommand returns a new cobra prune command for images +func NewPruneCommand(dockerCli command.Cli) *cobra.Command { + options := pruneOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused images", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, options) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones") + flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=')") + + return cmd +} + +const ( + allImageWarning = `WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue?` + danglingWarning = `WARNING! This will remove all dangling images. +Are you sure you want to continue?` +) + +func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) { + pruneFilters := options.filter.Value() + pruneFilters.Add("dangling", fmt.Sprintf("%v", !options.all)) + pruneFilters = command.PruneFilters(dockerCli, pruneFilters) + + warning := danglingWarning + if options.all { + warning = allImageWarning + } + if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters) + if err != nil { + return + } + + if len(report.ImagesDeleted) > 0 { + output = "Deleted Images:\n" + for _, st := range report.ImagesDeleted { + if st.Untagged != "" { + output += fmt.Sprintln("untagged:", st.Untagged) + } else { + output += fmt.Sprintln("deleted:", st.Deleted) + } + } + spaceReclaimed = report.SpaceReclaimed + } + + return +} + +// RunPrune calls the Image Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter}) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/pull.go b/vendor/github.com/docker/cli/cli/command/image/pull.go new file mode 100644 index 000000000..e60e5a434 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/pull.go @@ -0,0 +1,85 @@ +package image + +import ( + "fmt" + "strings" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type pullOptions struct { + remote string + all bool +} + +// NewPullCommand creates a new `docker pull` command +func NewPullCommand(dockerCli command.Cli) *cobra.Command { + var opts pullOptions + + cmd := &cobra.Command{ + Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]", + Short: "Pull an image or a repository from a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return runPull(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository") + command.AddTrustVerificationFlags(flags) + + return cmd +} + +func runPull(dockerCli command.Cli, opts pullOptions) error { + distributionRef, err := reference.ParseNormalizedNamed(opts.remote) + if err != nil { + return err + } + if opts.all && !reference.IsNameOnly(distributionRef) { + return errors.New("tag can't be used with --all-tags/-a") + } + + if !opts.all && reference.IsNameOnly(distributionRef) { + distributionRef = reference.TagNameOnly(distributionRef) + if tagged, ok := distributionRef.(reference.Tagged); ok { + fmt.Fprintf(dockerCli.Out(), "Using default tag: %s\n", tagged.Tag()) + } + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(distributionRef) + if err != nil { + return err + } + + ctx := context.Background() + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "pull") + + // Check if reference has a digest + _, isCanonical := distributionRef.(reference.Canonical) + if command.IsTrusted() && !isCanonical { + err = trustedPull(ctx, dockerCli, repoInfo, distributionRef, authConfig, requestPrivilege) + } else { + err = imagePullPrivileged(ctx, dockerCli, authConfig, reference.FamiliarString(distributionRef), requestPrivilege, opts.all) + } + if err != nil { + if strings.Contains(err.Error(), "when fetching 'plugin'") { + return errors.New(err.Error() + " - Use `docker plugin install`") + } + return err + } + + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/image/push.go b/vendor/github.com/docker/cli/cli/command/image/push.go new file mode 100644 index 000000000..cc95897bd --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/push.go @@ -0,0 +1,61 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +// NewPushCommand creates a new `docker push` command +func NewPushCommand(dockerCli command.Cli) *cobra.Command { + cmd := &cobra.Command{ + Use: "push [OPTIONS] NAME[:TAG]", + Short: "Push an image or a repository to a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPush(dockerCli, args[0]) + }, + } + + flags := cmd.Flags() + + command.AddTrustSigningFlags(flags) + + return cmd +} + +func runPush(dockerCli command.Cli, remote string) error { + ref, err := reference.ParseNormalizedNamed(remote) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + ctx := context.Background() + + // Resolve the Auth config relevant for this server + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push") + + if command.IsTrusted() { + return trustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege) + } + + responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/remove.go b/vendor/github.com/docker/cli/cli/command/image/remove.go new file mode 100644 index 000000000..91bf2f878 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/remove.go @@ -0,0 +1,78 @@ +package image + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool + noPrune bool +} + +// NewRemoveCommand creates a new `docker remove` command +func NewRemoveCommand(dockerCli command.Cli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rmi [OPTIONS] IMAGE [IMAGE...]", + Short: "Remove one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, opts, args) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image") + flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents") + + return cmd +} + +func newRemoveCommand(dockerCli command.Cli) *cobra.Command { + cmd := *NewRemoveCommand(dockerCli) + cmd.Aliases = []string{"rmi", "remove"} + cmd.Use = "rm [OPTIONS] IMAGE [IMAGE...]" + return &cmd +} + +func runRemove(dockerCli command.Cli, opts removeOptions, images []string) error { + client := dockerCli.Client() + ctx := context.Background() + + options := types.ImageRemoveOptions{ + Force: opts.force, + PruneChildren: !opts.noPrune, + } + + var errs []string + for _, image := range images { + dels, err := client.ImageRemove(ctx, image, options) + if err != nil { + errs = append(errs, err.Error()) + } else { + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged) + } + } + } + } + + if len(errs) > 0 { + return errors.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/command/image/save.go b/vendor/github.com/docker/cli/cli/command/image/save.go new file mode 100644 index 000000000..ba666d274 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/save.go @@ -0,0 +1,56 @@ +package image + +import ( + "io" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type saveOptions struct { + images []string + output string +} + +// NewSaveCommand creates a new `docker save` command +func NewSaveCommand(dockerCli command.Cli) *cobra.Command { + var opts saveOptions + + cmd := &cobra.Command{ + Use: "save [OPTIONS] IMAGE [IMAGE...]", + Short: "Save one or more images to a tar archive (streamed to STDOUT by default)", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.images = args + return runSave(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +func runSave(dockerCli command.Cli, opts saveOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("cowardly refusing to save to a terminal. Use the -o flag or redirect") + } + + responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/tag.go b/vendor/github.com/docker/cli/cli/command/image/tag.go new file mode 100644 index 000000000..2a50c127c --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/tag.go @@ -0,0 +1,41 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/spf13/cobra" +) + +type tagOptions struct { + image string + name string +} + +// NewTagCommand creates a new `docker tag` command +func NewTagCommand(dockerCli command.Cli) *cobra.Command { + var opts tagOptions + + cmd := &cobra.Command{ + Use: "tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]", + Short: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + opts.name = args[1] + return runTag(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + return cmd +} + +func runTag(dockerCli command.Cli, opts tagOptions) error { + ctx := context.Background() + + return dockerCli.Client().ImageTag(ctx, opts.image, opts.name) +} diff --git a/vendor/github.com/docker/cli/cli/command/image/trust.go b/vendor/github.com/docker/cli/cli/command/image/trust.go new file mode 100644 index 000000000..dd0d12c4c --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/image/trust.go @@ -0,0 +1,384 @@ +package image + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "io" + "path" + "sort" + + "github.com/Sirupsen/logrus" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/trust" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/registry" + "github.com/docker/notary/client" + "github.com/docker/notary/tuf/data" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type target struct { + name string + digest digest.Digest + size int64 +} + +// trustedPush handles content trust pushing of an image +func trustedPush(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + + return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody) +} + +// PushTrustedReference pushes a canonical reference to the trust server. +// nolint: gocyclo +func PushTrustedReference(streams command.Streams, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error { + // If it is a trusted push we would like to find the target entry which match the + // tag provided in the function and then do an AddTarget later. + target := &client.Target{} + // Count the times of calling for handleTarget, + // if it is called more that once, that should be considered an error in a trusted push. + cnt := 0 + handleTarget := func(aux *json.RawMessage) { + cnt++ + if cnt > 1 { + // handleTarget should only be called one. This will be treated as an error. + return + } + + var pushResult types.PushResult + err := json.Unmarshal(*aux, &pushResult) + if err == nil && pushResult.Tag != "" { + if dgst, err := digest.Parse(pushResult.Digest); err == nil { + h, err := hex.DecodeString(dgst.Hex()) + if err != nil { + target = nil + return + } + target.Name = pushResult.Tag + target.Hashes = data.Hashes{string(dgst.Algorithm()): h} + target.Length = int64(pushResult.Size) + } + } + } + + var tag string + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot push a digest reference") + case reference.NamedTagged: + tag = x.Tag() + default: + // We want trust signatures to always take an explicit tag, + // otherwise it will act as an untrusted push. + if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), nil); err != nil { + return err + } + fmt.Fprintln(streams.Out(), "No tag specified, skipping trust metadata push") + return nil + } + + if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), handleTarget); err != nil { + return err + } + + if cnt > 1 { + return errors.Errorf("internal error: only one call to handleTarget expected") + } + + if target == nil { + fmt.Fprintln(streams.Out(), "No targets found, please provide a specific tag in order to sign it") + return nil + } + + fmt.Fprintln(streams.Out(), "Signing and pushing trust metadata") + + repo, err := trust.GetNotaryRepository(streams, repoInfo, authConfig, "push", "pull") + if err != nil { + fmt.Fprintf(streams.Out(), "Error establishing connection to notary repository: %s\n", err) + return err + } + + // get the latest repository metadata so we can figure out which roles to sign + err = repo.Update(false) + + switch err.(type) { + case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: + keys := repo.CryptoService.ListKeys(data.CanonicalRootRole) + var rootKeyID string + // always select the first root key + if len(keys) > 0 { + sort.Strings(keys) + rootKeyID = keys[0] + } else { + rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey) + if err != nil { + return err + } + rootKeyID = rootPublicKey.ID() + } + + // Initialize the notary repository with a remotely managed snapshot key + if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil { + return trust.NotaryError(repoInfo.Name.Name(), err) + } + fmt.Fprintf(streams.Out(), "Finished initializing %q\n", repoInfo.Name.Name()) + err = repo.AddTarget(target, data.CanonicalTargetsRole) + case nil: + // already initialized and we have successfully downloaded the latest metadata + err = addTargetToAllSignableRoles(repo, target) + default: + return trust.NotaryError(repoInfo.Name.Name(), err) + } + + if err == nil { + err = repo.Publish() + } + + if err != nil { + fmt.Fprintf(streams.Out(), "Failed to sign %q:%s - %s\n", repoInfo.Name.Name(), tag, err.Error()) + return trust.NotaryError(repoInfo.Name.Name(), err) + } + + fmt.Fprintf(streams.Out(), "Successfully signed %q:%s\n", repoInfo.Name.Name(), tag) + return nil +} + +// Attempt to add the image target to all the top level delegation roles we can +// (based on whether we have the signing key and whether the role's path allows +// us to). +// If there are no delegation roles, we add to the targets role. +func addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.Target) error { + var signableRoles []string + + // translate the full key names, which includes the GUN, into just the key IDs + allCanonicalKeyIDs := make(map[string]struct{}) + for fullKeyID := range repo.CryptoService.ListAllKeys() { + allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{} + } + + allDelegationRoles, err := repo.GetDelegationRoles() + if err != nil { + return err + } + + // if there are no delegation roles, then just try to sign it into the targets role + if len(allDelegationRoles) == 0 { + return repo.AddTarget(target, data.CanonicalTargetsRole) + } + + // there are delegation roles, find every delegation role we have a key for, and + // attempt to sign into into all those roles. + for _, delegationRole := range allDelegationRoles { + // We do not support signing any delegation role that isn't a direct child of the targets role. + // Also don't bother checking the keys if we can't add the target + // to this role due to path restrictions + if path.Dir(delegationRole.Name) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) { + continue + } + + for _, canonicalKeyID := range delegationRole.KeyIDs { + if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok { + signableRoles = append(signableRoles, delegationRole.Name) + break + } + } + } + + if len(signableRoles) == 0 { + return errors.Errorf("no valid signing keys for delegation roles") + } + + return repo.AddTarget(target, signableRoles...) +} + +// imagePushPrivileged push the image +func imagePushPrivileged(ctx context.Context, cli command.Cli, authConfig types.AuthConfig, ref reference.Reference, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return nil, err + } + options := types.ImagePushOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + } + + return cli.Client().ImagePush(ctx, reference.FamiliarString(ref), options) +} + +// trustedPull handles content trust pulling of an image +func trustedPull(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + var refs []target + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) + return err + } + + if tagged, isTagged := ref.(reference.NamedTagged); !isTagged { + // List all targets + targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return trust.NotaryError(ref.Name(), err) + } + for _, tgt := range targets { + t, err := convertTarget(tgt.Target) + if err != nil { + fmt.Fprintf(cli.Out(), "Skipping target for %q\n", reference.FamiliarName(ref)) + continue + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole { + continue + } + refs = append(refs, t) + } + if len(refs) == 0 { + return trust.NotaryError(ref.Name(), errors.Errorf("No trusted tags for %s", ref.Name())) + } + } else { + t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return trust.NotaryError(ref.Name(), err) + } + // Only get the tag if it's in the top level targets role or the releases delegation role + // ignore it if it's in any other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return trust.NotaryError(ref.Name(), errors.Errorf("No trust data for %s", tagged.Tag())) + } + + logrus.Debugf("retrieving target for %s role\n", t.Role) + r, err := convertTarget(t.Target) + if err != nil { + return err + + } + refs = append(refs, r) + } + + for i, r := range refs { + displayTag := r.name + if displayTag != "" { + displayTag = ":" + displayTag + } + fmt.Fprintf(cli.Out(), "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), reference.FamiliarName(ref), displayTag, r.digest) + + trustedRef, err := reference.WithDigest(reference.TrimNamed(ref), r.digest) + if err != nil { + return err + } + if err := imagePullPrivileged(ctx, cli, authConfig, reference.FamiliarString(trustedRef), requestPrivilege, false); err != nil { + return err + } + + tagged, err := reference.WithTag(reference.TrimNamed(ref), r.name) + if err != nil { + return err + } + + if err := TagTrusted(ctx, cli, trustedRef, tagged); err != nil { + return err + } + } + return nil +} + +// imagePullPrivileged pulls the image and displays it to the output +func imagePullPrivileged(ctx context.Context, cli command.Cli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error { + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + options := types.ImagePullOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + All: all, + } + + responseBody, err := cli.Client().ImagePull(ctx, ref, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), nil) +} + +// TrustedReference returns the canonical trusted reference for an image reference +func TrustedReference(ctx context.Context, cli command.Cli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) { + var ( + repoInfo *registry.RepositoryInfo + err error + ) + if rs != nil { + repoInfo, err = rs.ResolveRepository(ref) + } else { + repoInfo, err = registry.ParseRepositoryInfo(ref) + } + if err != nil { + return nil, err + } + + // Resolve the Auth config relevant for this server + authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) + return nil, err + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(repoInfo.Name.Name(), err) + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", ref.Tag())) + } + r, err := convertTarget(t.Target) + if err != nil { + return nil, err + + } + + return reference.WithDigest(reference.TrimNamed(ref), r.digest) +} + +func convertTarget(t client.Target) (target, error) { + h, ok := t.Hashes["sha256"] + if !ok { + return target{}, errors.New("no valid hash, expecting sha256") + } + return target{ + name: t.Name, + digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), + size: t.Length, + }, nil +} + +// TagTrusted tags a trusted ref +// nolint: interfacer +func TagTrusted(ctx context.Context, cli command.Cli, trustedRef reference.Canonical, ref reference.NamedTagged) error { + // Use familiar references when interacting with client and output + familiarRef := reference.FamiliarString(ref) + trustedFamiliarRef := reference.FamiliarString(trustedRef) + + fmt.Fprintf(cli.Out(), "Tagging %s as %s\n", trustedFamiliarRef, familiarRef) + + return cli.Client().ImageTag(ctx, trustedFamiliarRef, familiarRef) +} diff --git a/vendor/github.com/docker/cli/cli/command/in.go b/vendor/github.com/docker/cli/cli/command/in.go new file mode 100644 index 000000000..54855c6dc --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/in.go @@ -0,0 +1,56 @@ +package command + +import ( + "errors" + "io" + "os" + "runtime" + + "github.com/docker/docker/pkg/term" +) + +// InStream is an input stream used by the DockerCli to read user input +type InStream struct { + CommonStream + in io.ReadCloser +} + +func (i *InStream) Read(p []byte) (int, error) { + return i.in.Read(p) +} + +// Close implements the Closer interface +func (i *InStream) Close() error { + return i.in.Close() +} + +// SetRawTerminal sets raw mode on the input terminal +func (i *InStream) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !i.CommonStream.isTerminal { + return nil + } + i.CommonStream.state, err = term.SetRawTerminal(i.CommonStream.fd) + return err +} + +// CheckTty checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (i *InStream) CheckTty(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !i.isTerminal { + eText := "the input device is not a TTY" + if runtime.GOOS == "windows" { + return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") + } + return errors.New(eText) + } + return nil +} + +// NewInStream returns a new InStream object from a ReadCloser +func NewInStream(in io.ReadCloser) *InStream { + fd, isTerminal := term.GetFdInfo(in) + return &InStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, in: in} +} diff --git a/vendor/github.com/docker/cli/cli/command/out.go b/vendor/github.com/docker/cli/cli/command/out.go new file mode 100644 index 000000000..27b44c235 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/out.go @@ -0,0 +1,50 @@ +package command + +import ( + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/term" +) + +// OutStream is an output stream used by the DockerCli to write normal program +// output. +type OutStream struct { + CommonStream + out io.Writer +} + +func (o *OutStream) Write(p []byte) (int, error) { + return o.out.Write(p) +} + +// SetRawTerminal sets raw mode on the input terminal +func (o *OutStream) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !o.CommonStream.isTerminal { + return nil + } + o.CommonStream.state, err = term.SetRawTerminalOutput(o.CommonStream.fd) + return err +} + +// GetTtySize returns the height and width in characters of the tty +func (o *OutStream) GetTtySize() (uint, uint) { + if !o.isTerminal { + return 0, 0 + } + ws, err := term.GetWinsize(o.fd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return uint(ws.Height), uint(ws.Width) +} + +// NewOutStream returns a new OutStream object from a Writer +func NewOutStream(out io.Writer) *OutStream { + fd, isTerminal := term.GetFdInfo(out) + return &OutStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, out: out} +} diff --git a/vendor/github.com/docker/cli/cli/command/registry.go b/vendor/github.com/docker/cli/cli/command/registry.go new file mode 100644 index 000000000..802b3a4b8 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/registry.go @@ -0,0 +1,189 @@ +package command + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/pkg/errors" +) + +// ElectAuthServer returns the default registry to use (by asking the daemon) +func ElectAuthServer(ctx context.Context, cli Cli) string { + // The daemon `/info` endpoint informs us of the default registry being + // used. This is essential in cross-platforms environment, where for + // example a Linux client might be interacting with a Windows daemon, hence + // the default registry URL might be Windows specific. + serverAddress := registry.IndexServer + if info, err := cli.Client().Info(ctx); err != nil { + fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) + } else if info.IndexServerAddress == "" { + fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", serverAddress) + } else { + serverAddress = info.IndexServerAddress + } + return serverAddress +} + +// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info +// for the given command. +func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { + return func() (string, error) { + fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName) + indexServer := registry.GetAuthConfigKey(index) + isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli) + authConfig, err := ConfigureAuth(cli, "", "", indexServer, isDefaultRegistry) + if err != nil { + return "", err + } + return EncodeAuthToBase64(authConfig) + } +} + +// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the +// default index, it uses the default index name for the daemon's platform, +// not the client's platform. +func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := index.Name + if index.Official { + configKey = ElectAuthServer(ctx, cli) + } + + a, _ := cli.CredentialsStore(configKey).Get(configKey) + return a +} + +// ConfigureAuth returns an AuthConfig from the specified user, password and server. +func ConfigureAuth(cli Cli, flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.SetIn(NewInStream(os.Stdin)) + } + + if !isDefaultRegistry { + serverAddress = registry.ConvertToHostname(serverAddress) + } + + authconfig, err := cli.CredentialsStore(serverAddress).Get(serverAddress) + if err != nil { + return authconfig, err + } + + // Some links documenting this: + // - https://code.google.com/archive/p/mintty/issues/56 + // - https://github.com/docker/docker/issues/15272 + // - https://mintty.github.io/ (compatibility) + // Linux will hit this if you attempt `cat | docker login`, and Windows + // will hit this if you attempt docker login from mintty where stdin + // is a pipe, not a character based console. + if flPassword == "" && !cli.In().IsTerminal() { + return authconfig, errors.Errorf("Error: Cannot perform an interactive login from a non TTY device") + } + + authconfig.Username = strings.TrimSpace(authconfig.Username) + + if flUser = strings.TrimSpace(flUser); flUser == "" { + if isDefaultRegistry { + // if this is a default registry (docker hub), then display the following message. + fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") + } + promptWithDefault(cli.Out(), "Username", authconfig.Username) + flUser = readInput(cli.In(), cli.Out()) + flUser = strings.TrimSpace(flUser) + if flUser == "" { + flUser = authconfig.Username + } + } + if flUser == "" { + return authconfig, errors.Errorf("Error: Non-null Username Required") + } + if flPassword == "" { + oldState, err := term.SaveState(cli.In().FD()) + if err != nil { + return authconfig, err + } + fmt.Fprintf(cli.Out(), "Password: ") + term.DisableEcho(cli.In().FD(), oldState) + + flPassword = readInput(cli.In(), cli.Out()) + fmt.Fprint(cli.Out(), "\n") + + term.RestoreTerminal(cli.In().FD(), oldState) + if flPassword == "" { + return authconfig, errors.Errorf("Error: Password Required") + } + } + + authconfig.Username = flUser + authconfig.Password = flPassword + authconfig.ServerAddress = serverAddress + authconfig.IdentityToken = "" + + return authconfig, nil +} + +func readInput(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) +} + +func promptWithDefault(out io.Writer, prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(out, "%s: ", prompt) + } else { + fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) + } +} + +// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image +func RetrieveAuthTokenFromImage(ctx context.Context, cli Cli, image string) (string, error) { + // Retrieve encoded auth token from the image reference + authConfig, err := resolveAuthConfigFromImage(ctx, cli, image) + if err != nil { + return "", err + } + encodedAuth, err := EncodeAuthToBase64(authConfig) + if err != nil { + return "", err + } + return encodedAuth, nil +} + +// resolveAuthConfigFromImage retrieves that AuthConfig using the image string +func resolveAuthConfigFromImage(ctx context.Context, cli Cli, image string) (types.AuthConfig, error) { + registryRef, err := reference.ParseNormalizedNamed(image) + if err != nil { + return types.AuthConfig{}, err + } + repoInfo, err := registry.ParseRepositoryInfo(registryRef) + if err != nil { + return types.AuthConfig{}, err + } + return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil +} diff --git a/vendor/github.com/docker/cli/cli/command/stream.go b/vendor/github.com/docker/cli/cli/command/stream.go new file mode 100644 index 000000000..71a43fa2e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/stream.go @@ -0,0 +1,34 @@ +package command + +import ( + "github.com/docker/docker/pkg/term" +) + +// CommonStream is an input stream used by the DockerCli to read user input +type CommonStream struct { + fd uintptr + isTerminal bool + state *term.State +} + +// FD returns the file descriptor number for this stream +func (s *CommonStream) FD() uintptr { + return s.fd +} + +// IsTerminal returns true if this stream is connected to a terminal +func (s *CommonStream) IsTerminal() bool { + return s.isTerminal +} + +// RestoreTerminal restores normal mode to the terminal +func (s *CommonStream) RestoreTerminal() { + if s.state != nil { + term.RestoreTerminal(s.fd, s.state) + } +} + +// SetIsTerminal sets the boolean used for isTerminal +func (s *CommonStream) SetIsTerminal(isTerminal bool) { + s.isTerminal = isTerminal +} diff --git a/vendor/github.com/docker/cli/cli/command/trust.go b/vendor/github.com/docker/cli/cli/command/trust.go new file mode 100644 index 000000000..c0742bc5b --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/trust.go @@ -0,0 +1,43 @@ +package command + +import ( + "os" + "strconv" + + "github.com/spf13/pflag" +) + +var ( + // TODO: make this not global + untrusted bool +) + +// AddTrustVerificationFlags adds content trust flags to the provided flagset +func AddTrustVerificationFlags(fs *pflag.FlagSet) { + trusted := getDefaultTrustState() + fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image verification") +} + +// AddTrustSigningFlags adds "signing" flags to the provided flagset +func AddTrustSigningFlags(fs *pflag.FlagSet) { + trusted := getDefaultTrustState() + fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image signing") +} + +// getDefaultTrustState returns true if content trust is enabled through the $DOCKER_CONTENT_TRUST environment variable. +func getDefaultTrustState() bool { + var trusted bool + if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { + if t, err := strconv.ParseBool(e); t || err != nil { + // treat any other value as true + trusted = true + } + } + return trusted +} + +// IsTrusted returns true if content trust is enabled, either through the $DOCKER_CONTENT_TRUST environment variable, +// or through `--disabled-content-trust=false` on a command. +func IsTrusted() bool { + return !untrusted +} diff --git a/vendor/github.com/docker/cli/cli/command/utils.go b/vendor/github.com/docker/cli/cli/command/utils.go new file mode 100644 index 000000000..3f4acaa2e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/utils.go @@ -0,0 +1,119 @@ +package command + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/system" +) + +// CopyToFile writes the content of the reader to the specified file +func CopyToFile(outfile string, r io.Reader) error { + // We use sequential file access here to avoid depleting the standby list + // on Windows. On Linux, this is a call directly to ioutil.TempFile + tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".docker_temp_") + if err != nil { + return err + } + + tmpPath := tmpFile.Name() + + _, err = io.Copy(tmpFile, r) + tmpFile.Close() + + if err != nil { + os.Remove(tmpPath) + return err + } + + if err = os.Rename(tmpPath, outfile); err != nil { + os.Remove(tmpPath) + return err + } + + return nil +} + +// capitalizeFirst capitalizes the first character of string +func capitalizeFirst(s string) string { + switch l := len(s); l { + case 0: + return s + case 1: + return strings.ToLower(s) + default: + return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) + } +} + +// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. +func PrettyPrint(i interface{}) string { + switch t := i.(type) { + case nil: + return "None" + case string: + return capitalizeFirst(t) + default: + return capitalizeFirst(fmt.Sprintf("%s", t)) + } +} + +// PromptForConfirmation requests and checks confirmation from user. +// This will display the provided message followed by ' [y/N] '. If +// the user input 'y' or 'Y' it returns true other false. If no +// message is provided "Are you sure you want to proceed? [y/N] " +// will be used instead. +func PromptForConfirmation(ins io.Reader, outs io.Writer, message string) bool { + if message == "" { + message = "Are you sure you want to proceed?" + } + message += " [y/N] " + + fmt.Fprintf(outs, message) + + // On Windows, force the use of the regular OS stdin stream. + if runtime.GOOS == "windows" { + ins = NewInStream(os.Stdin) + } + + reader := bufio.NewReader(ins) + answer, _, _ := reader.ReadLine() + return strings.ToLower(string(answer)) == "y" +} + +// PruneFilters returns consolidated prune filters obtained from config.json and cli +func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args { + if dockerCli.ConfigFile() == nil { + return pruneFilters + } + for _, f := range dockerCli.ConfigFile().PruneFilters { + parts := strings.SplitN(f, "=", 2) + if len(parts) != 2 { + continue + } + if parts[0] == "label" { + // CLI label filter supersede config.json. + // If CLI label filter conflict with config.json, + // skip adding label! filter in config.json. + if pruneFilters.Include("label!") && pruneFilters.ExactMatch("label!", parts[1]) { + continue + } + } else if parts[0] == "label!" { + // CLI label! filter supersede config.json. + // If CLI label! filter conflict with config.json, + // skip adding label filter in config.json. + if pruneFilters.Include("label") && pruneFilters.ExactMatch("label", parts[1]) { + continue + } + } + pruneFilters.Add(parts[0], parts[1]) + } + + return pruneFilters +} diff --git a/vendor/github.com/docker/cli/cli/error.go b/vendor/github.com/docker/cli/cli/error.go new file mode 100644 index 000000000..62f62433b --- /dev/null +++ b/vendor/github.com/docker/cli/cli/error.go @@ -0,0 +1,33 @@ +package cli + +import ( + "fmt" + "strings" +) + +// Errors is a list of errors. +// Useful in a loop if you don't want to return the error right away and you want to display after the loop, +// all the errors that happened during the loop. +type Errors []error + +func (errList Errors) Error() string { + if len(errList) < 1 { + return "" + } + + out := make([]string, len(errList)) + for i := range errList { + out[i] = errList[i].Error() + } + return strings.Join(out, ", ") +} + +// StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/vendor/github.com/docker/cli/cli/required.go b/vendor/github.com/docker/cli/cli/required.go new file mode 100644 index 000000000..d28af86be --- /dev/null +++ b/vendor/github.com/docker/cli/cli/required.go @@ -0,0 +1,96 @@ +package cli + +import ( + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// NoArgs validates args and returns an error if there are any args +func NoArgs(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } + + if cmd.HasSubCommands() { + return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) + } + + return errors.Errorf( + "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) +} + +// RequiresMinArgs returns an error if there is not at least min args +func RequiresMinArgs(min int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min { + return nil + } + return errors.Errorf( + "\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresMaxArgs returns an error if there is not at most max args +func RequiresMaxArgs(max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) <= max { + return nil + } + return errors.Errorf( + "\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresRangeArgs returns an error if there is not at least min args and at most max args +func RequiresRangeArgs(min int, max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min && len(args) <= max { + return nil + } + return errors.Errorf( + "\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// ExactArgs returns an error if there is not the exact number of args +func ExactArgs(number int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) == number { + return nil + } + return errors.Errorf( + "\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + number, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} diff --git a/vendor/github.com/docker/cli/cli/version.go b/vendor/github.com/docker/cli/cli/version.go new file mode 100644 index 000000000..bff7ab49e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/version.go @@ -0,0 +1,9 @@ +package cli + +// Default build-time variable. +// These values are overriding via ldflags +var ( + Version = "unknown-version" + GitCommit = "unknown-commit" + BuildTime = "unknown-buildtime" +) diff --git a/vendor/github.com/docker/distribution/registry/doc.go b/vendor/github.com/docker/distribution/registry/doc.go new file mode 100644 index 000000000..a1ba7f3ab --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/doc.go @@ -0,0 +1,2 @@ +// Package registry provides the main entrypoints for running a registry. +package registry diff --git a/vendor/github.com/docker/distribution/registry/registry.go b/vendor/github.com/docker/distribution/registry/registry.go new file mode 100644 index 000000000..ee3d6b0bd --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/registry.go @@ -0,0 +1,356 @@ +package registry + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + "time" + + "rsc.io/letsencrypt" + + log "github.com/Sirupsen/logrus" + logstash "github.com/bshuster-repo/logrus-logstash-hook" + "github.com/bugsnag/bugsnag-go" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/health" + "github.com/docker/distribution/registry/handlers" + "github.com/docker/distribution/registry/listener" + "github.com/docker/distribution/uuid" + "github.com/docker/distribution/version" + gorhandlers "github.com/gorilla/handlers" + "github.com/spf13/cobra" + "github.com/yvasiyarov/gorelic" +) + +// ServeCmd is a cobra command for running the registry. +var ServeCmd = &cobra.Command{ + Use: "serve ", + Short: "`serve` stores and distributes Docker images", + Long: "`serve` stores and distributes Docker images.", + Run: func(cmd *cobra.Command, args []string) { + + // setup context + ctx := context.WithVersion(context.Background(), version.Version) + + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + if config.HTTP.Debug.Addr != "" { + go func(addr string) { + log.Infof("debug server listening %v", addr) + if err := http.ListenAndServe(addr, nil); err != nil { + log.Fatalf("error listening on debug interface: %v", err) + } + }(config.HTTP.Debug.Addr) + } + + registry, err := NewRegistry(ctx, config) + if err != nil { + log.Fatalln(err) + } + + if err = registry.ListenAndServe(); err != nil { + log.Fatalln(err) + } + }, +} + +// A Registry represents a complete instance of the registry. +// TODO(aaronl): It might make sense for Registry to become an interface. +type Registry struct { + config *configuration.Configuration + app *handlers.App + server *http.Server +} + +// NewRegistry creates a new registry from a context and configuration struct. +func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { + var err error + ctx, err = configureLogging(ctx, config) + if err != nil { + return nil, fmt.Errorf("error configuring logger: %v", err) + } + + // inject a logger into the uuid library. warns us if there is a problem + // with uuid generation under low entropy. + uuid.Loggerf = context.GetLogger(ctx).Warnf + + app := handlers.NewApp(ctx, config) + // TODO(aaronl): The global scope of the health checks means NewRegistry + // can only be called once per process. + app.RegisterHealthChecks() + handler := configureReporting(app) + handler = alive("/", handler) + handler = health.Handler(handler) + handler = panicHandler(handler) + if !config.Log.AccessLog.Disabled { + handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) + } + + server := &http.Server{ + Handler: handler, + } + + return &Registry{ + app: app, + config: config, + server: server, + }, nil +} + +// ListenAndServe runs the registry's HTTP server. +func (registry *Registry) ListenAndServe() error { + config := registry.config + + ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) + if err != nil { + return err + } + + if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" { + tlsConf := &tls.Config{ + ClientAuth: tls.NoClientCert, + NextProtos: nextProtos(config), + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + } + + if config.HTTP.TLS.LetsEncrypt.CacheFile != "" { + if config.HTTP.TLS.Certificate != "" { + return fmt.Errorf("cannot specify both certificate and Let's Encrypt") + } + var m letsencrypt.Manager + if err := m.CacheFile(config.HTTP.TLS.LetsEncrypt.CacheFile); err != nil { + return err + } + if !m.Registered() { + if err := m.Register(config.HTTP.TLS.LetsEncrypt.Email, nil); err != nil { + return err + } + } + tlsConf.GetCertificate = m.GetCertificate + } else { + tlsConf.Certificates = make([]tls.Certificate, 1) + tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) + if err != nil { + return err + } + } + + if len(config.HTTP.TLS.ClientCAs) != 0 { + pool := x509.NewCertPool() + + for _, ca := range config.HTTP.TLS.ClientCAs { + caPem, err := ioutil.ReadFile(ca) + if err != nil { + return err + } + + if ok := pool.AppendCertsFromPEM(caPem); !ok { + return fmt.Errorf("Could not add CA to pool") + } + } + + for _, subj := range pool.Subjects() { + context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) + } + + tlsConf.ClientAuth = tls.RequireAndVerifyClientCert + tlsConf.ClientCAs = pool + } + + ln = tls.NewListener(ln, tlsConf) + context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) + } else { + context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) + } + + return registry.server.Serve(ln) +} + +func configureReporting(app *handlers.App) http.Handler { + var handler http.Handler = app + + if app.Config.Reporting.Bugsnag.APIKey != "" { + bugsnagConfig := bugsnag.Configuration{ + APIKey: app.Config.Reporting.Bugsnag.APIKey, + // TODO(brianbland): provide the registry version here + // AppVersion: "2.0", + } + if app.Config.Reporting.Bugsnag.ReleaseStage != "" { + bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage + } + if app.Config.Reporting.Bugsnag.Endpoint != "" { + bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint + } + bugsnag.Configure(bugsnagConfig) + + handler = bugsnag.Handler(handler) + } + + if app.Config.Reporting.NewRelic.LicenseKey != "" { + agent := gorelic.NewAgent() + agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey + if app.Config.Reporting.NewRelic.Name != "" { + agent.NewrelicName = app.Config.Reporting.NewRelic.Name + } + agent.CollectHTTPStat = true + agent.Verbose = app.Config.Reporting.NewRelic.Verbose + agent.Run() + + handler = agent.WrapHTTPHandler(handler) + } + + return handler +} + +// configureLogging prepares the context with a logger using the +// configuration. +func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { + if config.Log.Level == "" && config.Log.Formatter == "" { + // If no config for logging is set, fallback to deprecated "Loglevel". + log.SetLevel(logLevel(config.Loglevel)) + ctx = context.WithLogger(ctx, context.GetLogger(ctx)) + return ctx, nil + } + + log.SetLevel(logLevel(config.Log.Level)) + + formatter := config.Log.Formatter + if formatter == "" { + formatter = "text" // default formatter + } + + switch formatter { + case "json": + log.SetFormatter(&log.JSONFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "text": + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "logstash": + log.SetFormatter(&logstash.LogstashFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + default: + // just let the library use default on empty string. + if config.Log.Formatter != "" { + return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) + } + } + + if config.Log.Formatter != "" { + log.Debugf("using %q logging formatter", config.Log.Formatter) + } + + if len(config.Log.Fields) > 0 { + // build up the static fields, if present. + var fields []interface{} + for k := range config.Log.Fields { + fields = append(fields, k) + } + + ctx = context.WithValues(ctx, config.Log.Fields) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) + } + + return ctx, nil +} + +func logLevel(level configuration.Loglevel) log.Level { + l, err := log.ParseLevel(string(level)) + if err != nil { + l = log.InfoLevel + log.Warnf("error parsing level %q: %v, using %q ", level, err, l) + } + + return l +} + +// panicHandler add an HTTP handler to web app. The handler recover the happening +// panic. logrus.Panic transmits panic message to pre-config log hooks, which is +// defined in config.yml. +func panicHandler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + log.Panic(fmt.Sprintf("%v", err)) + } + }() + handler.ServeHTTP(w, r) + }) +} + +// alive simply wraps the handler with a route that always returns an http 200 +// response when the path is matched. If the path is not matched, the request +// is passed to the provided handler. There is no guarantee of anything but +// that the server is up. Wrap with other handlers (such as health.Handler) +// for greater affect. +func alive(path string, handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == path { + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusOK) + return + } + + handler.ServeHTTP(w, r) + }) +} + +func resolveConfiguration(args []string) (*configuration.Configuration, error) { + var configurationPath string + + if len(args) > 0 { + configurationPath = args[0] + } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { + configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") + } + + if configurationPath == "" { + return nil, fmt.Errorf("configuration path unspecified") + } + + fp, err := os.Open(configurationPath) + if err != nil { + return nil, err + } + + defer fp.Close() + + config, err := configuration.Parse(fp) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) + } + + return config, nil +} + +func nextProtos(config *configuration.Configuration) []string { + switch config.HTTP.HTTP2.Disabled { + case true: + return []string{"http/1.1"} + default: + return []string{"h2", "http/1.1"} + } +} diff --git a/vendor/github.com/docker/distribution/registry/root.go b/vendor/github.com/docker/distribution/registry/root.go new file mode 100644 index 000000000..5d3005c26 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/root.go @@ -0,0 +1,84 @@ +package registry + +import ( + "fmt" + "os" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/version" + "github.com/docker/libtrust" + "github.com/spf13/cobra" +) + +var showVersion bool + +func init() { + RootCmd.AddCommand(ServeCmd) + RootCmd.AddCommand(GCCmd) + GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs") + RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") +} + +// RootCmd is the main command for the 'registry' binary. +var RootCmd = &cobra.Command{ + Use: "registry", + Short: "`registry`", + Long: "`registry`", + Run: func(cmd *cobra.Command, args []string) { + if showVersion { + version.PrintVersion() + return + } + cmd.Usage() + }, +} + +var dryRun bool + +// GCCmd is the cobra command that corresponds to the garbage-collect subcommand +var GCCmd = &cobra.Command{ + Use: "garbage-collect ", + Short: "`garbage-collect` deletes layers not referenced by any manifests", + Long: "`garbage-collect` deletes layers not referenced by any manifests", + Run: func(cmd *cobra.Command, args []string) { + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters()) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err) + os.Exit(1) + } + + ctx := context.Background() + ctx, err = configureLogging(ctx, config) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err) + os.Exit(1) + } + + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + + registry, err := storage.NewRegistry(ctx, driver, storage.Schema1SigningKey(k)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err) + os.Exit(1) + } + + err = storage.MarkAndSweep(ctx, driver, registry, dryRun) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) + os.Exit(1) + } + }, +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go b/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go new file mode 100644 index 000000000..fad0a77a1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go @@ -0,0 +1,60 @@ +package storage + +import ( + "expvar" + "sync/atomic" + + "github.com/docker/distribution/registry/storage/cache" +) + +type blobStatCollector struct { + metrics cache.Metrics +} + +func (bsc *blobStatCollector) Hit() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Hits, 1) +} + +func (bsc *blobStatCollector) Miss() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Misses, 1) +} + +func (bsc *blobStatCollector) Metrics() cache.Metrics { + return bsc.metrics +} + +// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor +// cache requests. Note this is kept globally and made available via expvar. +// For more detailed metrics, its recommend to instrument a particular cache +// implementation. +var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + cache := registry.(*expvar.Map).Get("cache") + if cache == nil { + cache = &expvar.Map{} + cache.(*expvar.Map).Init() + registry.(*expvar.Map).Set("cache", cache) + } + + storage := cache.(*expvar.Map).Get("storage") + if storage == nil { + storage = &expvar.Map{} + storage.(*expvar.Map).Init() + cache.(*expvar.Map).Set("storage", storage) + } + + storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { + // no need for synchronous access: the increments are atomic and + // during reading, we don't care if the data is up to date. The + // numbers will always *eventually* be reported correctly. + return blobStatterCacheMetrics + })) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobserver.go b/vendor/github.com/docker/distribution/registry/storage/blobserver.go new file mode 100644 index 000000000..739bf3cb3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobserver.go @@ -0,0 +1,78 @@ +package storage + +import ( + "fmt" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" + "github.com/opencontainers/go-digest" +) + +// TODO(stevvooe): This should configurable in the future. +const blobCacheControlMaxAge = 365 * 24 * time.Hour + +// blobServer simply serves blobs from a driver instance using a path function +// to identify paths and a descriptor service to fill in metadata. +type blobServer struct { + driver driver.StorageDriver + statter distribution.BlobStatter + pathFn func(dgst digest.Digest) (string, error) + redirect bool // allows disabling URLFor redirects +} + +func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return err + } + + path, err := bs.pathFn(desc.Digest) + if err != nil { + return err + } + + if bs.redirect { + redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) + switch err.(type) { + case nil: + // Redirect to storage URL. + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + return err + + case driver.ErrUnsupportedMethod: + // Fallback to serving the content directly. + default: + // Some unexpected error. + return err + } + } + + br, err := newFileReader(ctx, bs.driver, path, desc.Size) + if err != nil { + return err + } + defer br.Close() + + w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) + + if w.Header().Get("Docker-Content-Digest") == "" { + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + } + + if w.Header().Get("Content-Type") == "" { + // Set the content type if not already set. + w.Header().Set("Content-Type", desc.MediaType) + } + + if w.Header().Get("Content-Length") == "" { + // Set the content length if not already set. + w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) + } + + http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobstore.go b/vendor/github.com/docker/distribution/registry/storage/blobstore.go new file mode 100644 index 000000000..9f9071ca6 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobstore.go @@ -0,0 +1,223 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" + "github.com/opencontainers/go-digest" +) + +// blobStore implements the read side of the blob store interface over a +// driver without enforcing per-repository membership. This object is +// intentionally a leaky abstraction, providing utility methods that support +// creating and traversing backend links. +type blobStore struct { + driver driver.StorageDriver + statter distribution.BlobStatter +} + +var _ distribution.BlobProvider = &blobStore{} + +// Get implements the BlobReadService.Get call. +func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + bp, err := bs.path(dgst) + if err != nil { + return nil, err + } + + p, err := bs.driver.GetContent(ctx, bp) + if err != nil { + switch err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUnknown + } + + return nil, err + } + + return p, err +} + +func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return nil, err + } + + path, err := bs.path(desc.Digest) + if err != nil { + return nil, err + } + + return newFileReader(ctx, bs.driver, path, desc.Size) +} + +// Put stores the content p in the blob store, calculating the digest. If the +// content is already present, only the digest will be returned. This should +// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations +func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst := digest.FromBytes(p) + desc, err := bs.statter.Stat(ctx, dgst) + if err == nil { + // content already present + return desc, nil + } else if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err) + // real error, return it + return distribution.Descriptor{}, err + } + + bp, err := bs.path(dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype here, as well. + return distribution.Descriptor{ + Size: int64(len(p)), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, bs.driver.PutContent(ctx, bp, p) +} + +func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error { + + specPath, err := pathFor(blobsPathSpec{}) + if err != nil { + return err + } + + err = Walk(ctx, bs.driver, specPath, func(fileInfo driver.FileInfo) error { + // skip directories + if fileInfo.IsDir() { + return nil + } + + currentPath := fileInfo.Path() + // we only want to parse paths that end with /data + _, fileName := path.Split(currentPath) + if fileName != "data" { + return nil + } + + digest, err := digestFromPath(currentPath) + if err != nil { + return err + } + + return ingester(digest) + }) + return err +} + +// path returns the canonical path for the blob identified by digest. The blob +// may or may not exist. +func (bs *blobStore) path(dgst digest.Digest) (string, error) { + bp, err := pathFor(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return "", err + } + + return bp, nil +} + +// link links the path to the provided digest by writing the digest into the +// target file. Caller must ensure that the blob actually exists. +func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { + // The contents of the "link" file are the exact string contents of the + // digest, which is specified in that package. + return bs.driver.PutContent(ctx, path, []byte(dgst)) +} + +// readlink returns the linked digest at path. +func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { + content, err := bs.driver.GetContent(ctx, path) + if err != nil { + return "", err + } + + linked, err := digest.Parse(string(content)) + if err != nil { + return "", err + } + + return linked, nil +} + +// resolve reads the digest link at path and returns the blob store path. +func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { + dgst, err := bs.readlink(ctx, path) + if err != nil { + return "", err + } + + return bs.path(dgst) +} + +type blobStatter struct { + driver driver.StorageDriver +} + +var _ distribution.BlobDescriptorService = &blobStatter{} + +// Stat implements BlobStatter.Stat by returning the descriptor for the blob +// in the main blob store. If this method returns successfully, there is +// strong guarantee that the blob exists and is available. +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + path, err := pathFor(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return distribution.Descriptor{}, err + } + + fi, err := bs.driver.Stat(ctx, path) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrBlobUnknown + default: + return distribution.Descriptor{}, err + } + } + + if fi.IsDir() { + // NOTE(stevvooe): This represents a corruption situation. Somehow, we + // calculated a blob path and then detected a directory. We log the + // error and then error on the side of not knowing about the blob. + context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + // TODO(stevvooe): Add method to resolve the mediatype. We can store and + // cache a "global" media type for the blob, even if a specific repo has a + // mediatype that overrides the main one. + + return distribution.Descriptor{ + Size: fi.Size(), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, nil +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go new file mode 100644 index 000000000..d51e27ad3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go @@ -0,0 +1,400 @@ +package storage + +import ( + "errors" + "fmt" + "io" + "path" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/opencontainers/go-digest" +) + +var ( + errResumableDigestNotAvailable = errors.New("resumable digest not available") +) + +const ( + // digestSha256Empty is the canonical sha256 digest of empty data + digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +// blobWriter is used to control the various aspects of resumable +// blob upload. +type blobWriter struct { + ctx context.Context + blobStore *linkedBlobStore + + id string + startedAt time.Time + digester digest.Digester + written int64 // track the contiguous write + + fileWriter storagedriver.FileWriter + driver storagedriver.StorageDriver + path string + + resumableDigestEnabled bool + committed bool +} + +var _ distribution.BlobWriter = &blobWriter{} + +// ID returns the identifier for this upload. +func (bw *blobWriter) ID() string { + return bw.id +} + +func (bw *blobWriter) StartedAt() time.Time { + return bw.startedAt +} + +// Commit marks the upload as completed, returning a valid descriptor. The +// final size and digest are checked against the first descriptor provided. +func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + context.GetLogger(ctx).Debug("(*blobWriter).Commit") + + if err := bw.fileWriter.Commit(); err != nil { + return distribution.Descriptor{}, err + } + + bw.Close() + desc.Size = bw.Size() + + canonical, err := bw.validateBlob(ctx, desc) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.moveBlob(ctx, canonical); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.removeResources(ctx); err != nil { + return distribution.Descriptor{}, err + } + + err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) + if err != nil { + return distribution.Descriptor{}, err + } + + bw.committed = true + return canonical, nil +} + +// Cancel the blob upload process, releasing any resources associated with +// the writer and canceling the operation. +func (bw *blobWriter) Cancel(ctx context.Context) error { + context.GetLogger(ctx).Debug("(*blobWriter).Cancel") + if err := bw.fileWriter.Cancel(); err != nil { + return err + } + + if err := bw.Close(); err != nil { + context.GetLogger(ctx).Errorf("error closing blobwriter: %s", err) + } + + if err := bw.removeResources(ctx); err != nil { + return err + } + + return nil +} + +func (bw *blobWriter) Size() int64 { + return bw.fileWriter.Size() +} + +func (bw *blobWriter) Write(p []byte) (int, error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p) + bw.written += int64(n) + + return n, err +} + +func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { + return 0, err + } + + nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r) + bw.written += nn + + return nn, err +} + +func (bw *blobWriter) Close() error { + if bw.committed { + return errors.New("blobwriter close after commit") + } + + if err := bw.storeHashState(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { + return err + } + + return bw.fileWriter.Close() +} + +// validateBlob checks the data against the digest, returning an error if it +// does not match. The canonical descriptor is returned. +func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + var ( + verified, fullHash bool + canonical digest.Digest + ) + + if desc.Digest == "" { + // if no descriptors are provided, we have nothing to validate + // against. We don't really want to support this for the registry. + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Reason: fmt.Errorf("cannot validate against empty digest"), + } + } + + var size int64 + + // Stat the on disk file + if fi, err := bw.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): We really don't care if the file is + // not actually present for the reader. We now assume + // that the desc length is zero. + desc.Size = 0 + default: + // Any other error we want propagated up the stack. + return distribution.Descriptor{}, err + } + } else { + if fi.IsDir() { + return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) + } + + size = fi.Size() + } + + if desc.Size > 0 { + if desc.Size != size { + return distribution.Descriptor{}, distribution.ErrBlobInvalidLength + } + } else { + // if provided 0 or negative length, we can assume caller doesn't know or + // care about length. + desc.Size = size + } + + // TODO(stevvooe): This section is very meandering. Need to be broken down + // to be a lot more clear. + + if err := bw.resumeDigest(ctx); err == nil { + canonical = bw.digester.Digest() + + if canonical.Algorithm() == desc.Digest.Algorithm() { + // Common case: client and server prefer the same canonical digest + // algorithm - currently SHA256. + verified = desc.Digest == canonical + } else { + // The client wants to use a different digest algorithm. They'll just + // have to be patient and wait for us to download and re-hash the + // uploaded content using that digest algorithm. + fullHash = true + } + } else if err == errResumableDigestNotAvailable { + // Not using resumable digests, so we need to hash the entire layer. + fullHash = true + } else { + return distribution.Descriptor{}, err + } + + if fullHash { + // a fantastic optimization: if the the written data and the size are + // the same, we don't need to read the data from the backend. This is + // because we've written the entire file in the lifecycle of the + // current instance. + if bw.written == size && digest.Canonical == desc.Digest.Algorithm() { + canonical = bw.digester.Digest() + verified = desc.Digest == canonical + } + + // If the check based on size fails, we fall back to the slowest of + // paths. We may be able to make the size-based check a stronger + // guarantee, so this may be defensive. + if !verified { + digester := digest.Canonical.Digester() + verifier := desc.Digest.Verifier() + + // Read the file from the backend driver and validate it. + fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size) + if err != nil { + return distribution.Descriptor{}, err + } + defer fr.Close() + + tr := io.TeeReader(fr, digester.Hash()) + + if _, err := io.Copy(verifier, tr); err != nil { + return distribution.Descriptor{}, err + } + + canonical = digester.Digest() + verified = verifier.Verified() + } + } + + if !verified { + context.GetLoggerWithFields(ctx, + map[interface{}]interface{}{ + "canonical": canonical, + "provided": desc.Digest, + }, "canonical", "provided"). + Errorf("canonical digest does match provided digest") + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Digest: desc.Digest, + Reason: fmt.Errorf("content does not match digest"), + } + } + + // update desc with canonical hash + desc.Digest = canonical + + if desc.MediaType == "" { + desc.MediaType = "application/octet-stream" + } + + return desc, nil +} + +// moveBlob moves the data into its final, hash-qualified destination, +// identified by dgst. The layer should be validated before commencing the +// move. +func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { + blobPath, err := pathFor(blobDataPathSpec{ + digest: desc.Digest, + }) + + if err != nil { + return err + } + + // Check for existence + if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // ensure that it doesn't exist. + default: + return err + } + } else { + // If the path exists, we can assume that the content has already + // been uploaded, since the blob storage is content-addressable. + // While it may be corrupted, detection of such corruption belongs + // elsewhere. + return nil + } + + // If no data was received, we may not actually have a file on disk. Check + // the size here and write a zero-length file to blobPath if this is the + // case. For the most part, this should only ever happen with zero-length + // blobs. + if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // HACK(stevvooe): This is slightly dangerous: if we verify above, + // get a hash, then the underlying file is deleted, we risk moving + // a zero-length blob into a nonzero-length blob location. To + // prevent this horrid thing, we employ the hack of only allowing + // to this happen for the digest of an empty blob. + if desc.Digest == digestSha256Empty { + return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) + } + + // We let this fail during the move below. + logrus. + WithField("upload.id", bw.ID()). + WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") + default: + return err // unrelated error + } + } + + // TODO(stevvooe): We should also write the mediatype when executing this move. + + return bw.blobStore.driver.Move(ctx, bw.path, blobPath) +} + +// removeResources should clean up all resources associated with the upload +// instance. An error will be returned if the clean up cannot proceed. If the +// resources are already not present, no error will be returned. +func (bw *blobWriter) removeResources(ctx context.Context) error { + dataPath, err := pathFor(uploadDataPathSpec{ + name: bw.blobStore.repository.Named().Name(), + id: bw.id, + }) + + if err != nil { + return err + } + + // Resolve and delete the containing directory, which should include any + // upload related files. + dirPath := path.Dir(dataPath) + if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // already gone! + default: + // This should be uncommon enough such that returning an error + // should be okay. At this point, the upload should be mostly + // complete, but perhaps the backend became unaccessible. + context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) + return err + } + } + + return nil +} + +func (bw *blobWriter) Reader() (io.ReadCloser, error) { + // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 + try := 1 + for try <= 5 { + _, err := bw.driver.Stat(bw.ctx, bw.path) + if err == nil { + break + } + switch err.(type) { + case storagedriver.PathNotFoundError: + context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) + time.Sleep(1 * time.Second) + try++ + default: + return nil, err + } + } + + readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0) + if err != nil { + return nil, err + } + + return readCloser, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go new file mode 100644 index 000000000..32f130974 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go @@ -0,0 +1,17 @@ +// +build noresumabledigest + +package storage + +import ( + "github.com/docker/distribution/context" +) + +// resumeHashAt is a noop when resumable digest support is disabled. +func (bw *blobWriter) resumeDigest(ctx context.Context) error { + return errResumableDigestNotAvailable +} + +// storeHashState is a noop when resumable digest support is disabled. +func (bw *blobWriter) storeHashState(ctx context.Context) error { + return errResumableDigestNotAvailable +} diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go new file mode 100644 index 000000000..ff5482c3f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go @@ -0,0 +1,145 @@ +// +build !noresumabledigest + +package storage + +import ( + "fmt" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/stevvooe/resumable" + + // register resumable hashes with import + _ "github.com/stevvooe/resumable/sha256" + _ "github.com/stevvooe/resumable/sha512" +) + +// resumeDigest attempts to restore the state of the internal hash function +// by loading the most recent saved hash state equal to the current size of the blob. +func (bw *blobWriter) resumeDigest(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + offset := bw.fileWriter.Size() + if offset == int64(h.Len()) { + // State of digester is already at the requested offset. + return nil + } + + // List hash states from storage backend. + var hashStateMatch hashStateEntry + hashStates, err := bw.getStoredHashStates(ctx) + if err != nil { + return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) + } + + // Find the highest stored hashState with offset equal to + // the requested offset. + for _, hashState := range hashStates { + if hashState.offset == offset { + hashStateMatch = hashState + break // Found an exact offset match. + } + } + + if hashStateMatch.offset == 0 { + // No need to load any state, just reset the hasher. + h.Reset() + } else { + storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) + if err != nil { + return err + } + + if err = h.Restore(storedState); err != nil { + return err + } + } + + // Mind the gap. + if gapLen := offset - int64(h.Len()); gapLen > 0 { + return errResumableDigestNotAvailable + } + + return nil +} + +type hashStateEntry struct { + offset int64 + path string +} + +// getStoredHashStates returns a slice of hashStateEntries for this upload. +func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { + uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Named().String(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + list: true, + }) + + if err != nil { + return nil, err + } + + paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) + if err != nil { + if _, ok := err.(storagedriver.PathNotFoundError); !ok { + return nil, err + } + // Treat PathNotFoundError as no entries. + paths = nil + } + + hashStateEntries := make([]hashStateEntry, 0, len(paths)) + + for _, p := range paths { + pathSuffix := path.Base(p) + // The suffix should be the offset. + offset, err := strconv.ParseInt(pathSuffix, 0, 64) + if err != nil { + logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) + } + + hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) + } + + return hashStateEntries, nil +} + +func (bw *blobWriter) storeHashState(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + + uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Named().String(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + offset: int64(h.Len()), + }) + + if err != nil { + return err + } + + hashState, err := h.State() + if err != nil { + return err + } + + return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/catalog.go b/vendor/github.com/docker/distribution/registry/storage/catalog.go new file mode 100644 index 000000000..0b59a39ac --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/catalog.go @@ -0,0 +1,153 @@ +package storage + +import ( + "errors" + "io" + "path" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// errFinishedWalk signals an early exit to the walk when the current query +// is satisfied. +var errFinishedWalk = errors.New("finished walk") + +// Returns a list, or partial list, of repositories in the registry. +// Because it's a quite expensive operation, it should only be used when building up +// an initial set of repositories. +func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + var foundRepos []string + + if len(repos) == 0 { + return 0, errors.New("no space in slice") + } + + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return 0, err + } + + err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + err := handleRepository(fileInfo, root, last, func(repoPath string) error { + foundRepos = append(foundRepos, repoPath) + return nil + }) + if err != nil { + return err + } + + // if we've filled our array, no need to walk any further + if len(foundRepos) == len(repos) { + return errFinishedWalk + } + + return nil + }) + + n = copy(repos, foundRepos) + + switch err { + case nil: + // nil means that we completed walk and didn't fill buffer. No more + // records are available. + err = io.EOF + case errFinishedWalk: + // more records are available. + err = nil + } + + return n, err +} + +// Enumerate applies ingester to each repository +func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error { + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return err + } + + err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + return handleRepository(fileInfo, root, "", ingester) + }) + + return err +} + +// lessPath returns true if one path a is less than path b. +// +// A component-wise comparison is done, rather than the lexical comparison of +// strings. +func lessPath(a, b string) bool { + // we provide this behavior by making separator always sort first. + return compareReplaceInline(a, b, '/', '\x00') < 0 +} + +// compareReplaceInline modifies runtime.cmpstring to replace old with new +// during a byte-wise comparison. +func compareReplaceInline(s1, s2 string, old, new byte) int { + // TODO(stevvooe): We are missing an optimization when the s1 and s2 have + // the exact same slice header. It will make the code unsafe but can + // provide some extra performance. + + l := len(s1) + if len(s2) < l { + l = len(s2) + } + + for i := 0; i < l; i++ { + c1, c2 := s1[i], s2[i] + if c1 == old { + c1 = new + } + + if c2 == old { + c2 = new + } + + if c1 < c2 { + return -1 + } + + if c1 > c2 { + return +1 + } + } + + if len(s1) < len(s2) { + return -1 + } + + if len(s1) > len(s2) { + return +1 + } + + return 0 +} + +// handleRepository calls function fn with a repository path if fileInfo +// has a path of a repository under root and that it is lexographically +// after last. Otherwise, it will return ErrSkipDir. This should be used +// with Walk to do handling with repositories in a storage. +func handleRepository(fileInfo driver.FileInfo, root, last string, fn func(repoPath string) error) error { + filePath := fileInfo.Path() + + // lop the base path off + repo := filePath[len(root)+1:] + + _, file := path.Split(repo) + if file == "_layers" { + repo = strings.TrimSuffix(repo, "/_layers") + if lessPath(last, repo) { + if err := fn(repo); err != nil { + return err + } + } + return ErrSkipDir + } else if strings.HasPrefix(file, "_") { + return ErrSkipDir + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/doc.go b/vendor/github.com/docker/distribution/registry/storage/doc.go new file mode 100644 index 000000000..387d92348 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/doc.go @@ -0,0 +1,3 @@ +// Package storage contains storage services for use in the registry +// application. It should be considered an internal package, as of Go 1.4. +package storage diff --git a/vendor/github.com/docker/distribution/registry/storage/filereader.go b/vendor/github.com/docker/distribution/registry/storage/filereader.go new file mode 100644 index 000000000..3b06c8179 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/filereader.go @@ -0,0 +1,177 @@ +package storage + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// TODO(stevvooe): Set an optimal buffer size here. We'll have to +// understand the latency characteristics of the underlying network to +// set this correctly, so we may want to leave it to the driver. For +// out of process drivers, we'll have to optimize this buffer size for +// local communication. +const fileReaderBufferSize = 4 << 20 + +// remoteFileReader provides a read seeker interface to files stored in +// storagedriver. Used to implement part of layer interface and will be used +// to implement read side of LayerUpload. +type fileReader struct { + driver storagedriver.StorageDriver + + ctx context.Context + + // identifying fields + path string + size int64 // size is the total size, must be set. + + // mutable fields + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 // offset is the current read offset + err error // terminal error, if set, reader is closed +} + +// newFileReader initializes a file reader for the remote file. The reader +// takes on the size and path that must be determined externally with a stat +// call. The reader operates optimistically, assuming that the file is already +// there. +func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { + return &fileReader{ + ctx: ctx, + driver: driver, + path: path, + size: size, + }, nil +} + +func (fr *fileReader) Read(p []byte) (n int, err error) { + if fr.err != nil { + return 0, fr.err + } + + rd, err := fr.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + fr.offset += int64(n) + + // Simulate io.EOR error if we reach filesize. + if err == nil && fr.offset >= fr.size { + err = io.EOF + } + + return n, err +} + +func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { + if fr.err != nil { + return 0, fr.err + } + + var err error + newOffset := fr.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = fr.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + if fr.offset != newOffset { + fr.reset() + } + + // No problems, set the offset. + fr.offset = newOffset + } + + return fr.offset, err +} + +func (fr *fileReader) Close() error { + return fr.closeWithErr(fmt.Errorf("fileReader: closed")) +} + +// reader prepares the current reader at the lrs offset, ensuring its buffered +// and ready to go. +func (fr *fileReader) reader() (io.Reader, error) { + if fr.err != nil { + return nil, fr.err + } + + if fr.rc != nil { + return fr.brd, nil + } + + // If we don't have a reader, open one up. + rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): If the path is not found, we simply return a + // reader that returns io.EOF. However, we do not set fr.rc, + // allowing future attempts at getting a reader to possibly + // succeed if the file turns up later. + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + default: + return nil, err + } + } + + fr.rc = rc + + if fr.brd == nil { + fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) + } else { + fr.brd.Reset(fr.rc) + } + + return fr.brd, nil +} + +// resetReader resets the reader, forcing the read method to open up a new +// connection and rebuild the buffered reader. This should be called when the +// offset and the reader will become out of sync, such as during a seek +// operation. +func (fr *fileReader) reset() { + if fr.err != nil { + return + } + if fr.rc != nil { + fr.rc.Close() + fr.rc = nil + } +} + +func (fr *fileReader) closeWithErr(err error) error { + if fr.err != nil { + return fr.err + } + + fr.err = err + + // close and release reader chain + if fr.rc != nil { + fr.rc.Close() + } + + fr.rc = nil + fr.brd = nil + + return fr.err +} diff --git a/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go b/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go new file mode 100644 index 000000000..392898933 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/garbagecollect.go @@ -0,0 +1,114 @@ +package storage + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" + "github.com/opencontainers/go-digest" +) + +func emit(format string, a ...interface{}) { + fmt.Printf(format+"\n", a...) +} + +// MarkAndSweep performs a mark and sweep of registry data +func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, dryRun bool) error { + repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) + if !ok { + return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator") + } + + // mark + markSet := make(map[digest.Digest]struct{}) + err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error { + emit(repoName) + + var err error + named, err := reference.WithName(repoName) + if err != nil { + return fmt.Errorf("failed to parse repo name %s: %v", repoName, err) + } + repository, err := registry.Repository(ctx, named) + if err != nil { + return fmt.Errorf("failed to construct repository: %v", err) + } + + manifestService, err := repository.Manifests(ctx) + if err != nil { + return fmt.Errorf("failed to construct manifest service: %v", err) + } + + manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator) + if !ok { + return fmt.Errorf("unable to convert ManifestService into ManifestEnumerator") + } + + err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + // Mark the manifest's blob + emit("%s: marking manifest %s ", repoName, dgst) + markSet[dgst] = struct{}{} + + manifest, err := manifestService.Get(ctx, dgst) + if err != nil { + return fmt.Errorf("failed to retrieve manifest for digest %v: %v", dgst, err) + } + + descriptors := manifest.References() + for _, descriptor := range descriptors { + markSet[descriptor.Digest] = struct{}{} + emit("%s: marking blob %s", repoName, descriptor.Digest) + } + + return nil + }) + + if err != nil { + // In certain situations such as unfinished uploads, deleting all + // tags in S3 or removing the _manifests folder manually, this + // error may be of type PathNotFound. + // + // In these cases we can continue marking other manifests safely. + if _, ok := err.(driver.PathNotFoundError); ok { + return nil + } + } + + return err + }) + + if err != nil { + return fmt.Errorf("failed to mark: %v", err) + } + + // sweep + blobService := registry.Blobs() + deleteSet := make(map[digest.Digest]struct{}) + err = blobService.Enumerate(ctx, func(dgst digest.Digest) error { + // check if digest is in markSet. If not, delete it! + if _, ok := markSet[dgst]; !ok { + deleteSet[dgst] = struct{}{} + } + return nil + }) + if err != nil { + return fmt.Errorf("error enumerating blobs: %v", err) + } + emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet)) + // Construct vacuum + vacuum := NewVacuum(ctx, storageDriver) + for dgst := range deleteSet { + emit("blob eligible for deletion: %s", dgst) + if dryRun { + continue + } + err = vacuum.RemoveBlob(string(dgst)) + if err != nil { + return fmt.Errorf("failed to delete blob %s: %v", dgst, err) + } + } + + return err +} diff --git a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go new file mode 100644 index 000000000..a1929eed3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go @@ -0,0 +1,470 @@ +package storage + +import ( + "fmt" + "net/http" + "path" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" + "github.com/opencontainers/go-digest" +) + +// linkPathFunc describes a function that can resolve a link based on the +// repository name and digest. +type linkPathFunc func(name string, dgst digest.Digest) (string, error) + +// linkedBlobStore provides a full BlobService that namespaces the blobs to a +// given repository. Effectively, it manages the links in a given repository +// that grant access to the global blob store. +type linkedBlobStore struct { + *blobStore + registry *registry + blobServer distribution.BlobServer + blobAccessController distribution.BlobDescriptorService + repository distribution.Repository + ctx context.Context // only to be used where context can't come through method args + deleteEnabled bool + resumableDigestEnabled bool + + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc + + // linkDirectoryPathSpec locates the root directories in which one might find links + linkDirectoryPathSpec pathSpec +} + +var _ distribution.BlobStore = &linkedBlobStore{} + +func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return lbs.blobAccessController.Stat(ctx, dgst) +} + +func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Get(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Open(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return err + } + + if canonical.MediaType != "" { + // Set the repository local content type. + w.Header().Set("Content-Type", canonical.MediaType) + } + + return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) +} + +func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst := digest.FromBytes(p) + // Place the data in the blob store first. + desc, err := lbs.blobStore.Put(ctx, mediaType, p) + if err != nil { + context.GetLogger(ctx).Errorf("error putting into main store: %v", err) + return distribution.Descriptor{}, err + } + + if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype if incoming differs from what is + // returned by Put above. Note that we should allow updates for a given + // repository. + + return desc, lbs.linkBlob(ctx, desc) +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +// Writer begins a blob write session, returning a handle. +func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") + + var opts distribution.CreateOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + if opts.Mount.ShouldMount { + desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest(), opts.Mount.Stat) + if err == nil { + // Mount successful, no need to initiate an upload session + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + } + } + + uuid := uuid.Generate().String() + startedAt := time.Now().UTC() + + path, err := pathFor(uploadDataPathSpec{ + name: lbs.repository.Named().Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ + name: lbs.repository.Named().Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + // Write a startedat file for this upload + if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, uuid, path, startedAt, false) +} + +func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") + + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ + name: lbs.repository.Named().Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUploadUnknown + default: + return nil, err + } + } + + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return nil, err + } + + path, err := pathFor(uploadDataPathSpec{ + name: lbs.repository.Named().Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, id, path, startedAt, true) +} + +func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + if !lbs.deleteEnabled { + return distribution.ErrUnsupported + } + + // Ensure the blob is available for deletion + _, err := lbs.blobAccessController.Stat(ctx, dgst) + if err != nil { + return err + } + + err = lbs.blobAccessController.Clear(ctx, dgst) + if err != nil { + return err + } + + return nil +} + +func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest.Digest) error) error { + rootPath, err := pathFor(lbs.linkDirectoryPathSpec) + if err != nil { + return err + } + err = Walk(ctx, lbs.blobStore.driver, rootPath, func(fileInfo driver.FileInfo) error { + // exit early if directory... + if fileInfo.IsDir() { + return nil + } + filePath := fileInfo.Path() + + // check if it's a link + _, fileName := path.Split(filePath) + if fileName != "link" { + return nil + } + + // read the digest found in link + digest, err := lbs.blobStore.readlink(ctx, filePath) + if err != nil { + return err + } + + // ensure this conforms to the linkPathFns + _, err = lbs.Stat(ctx, digest) + if err != nil { + // we expect this error to occur so we move on + if err == distribution.ErrBlobUnknown { + return nil + } + return err + } + + err = ingestor(digest) + if err != nil { + return err + } + + return nil + }) + + if err != nil { + return err + } + + return nil +} + +func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest, sourceStat *distribution.Descriptor) (distribution.Descriptor, error) { + var stat distribution.Descriptor + if sourceStat == nil { + // look up the blob info from the sourceRepo if not already provided + repo, err := lbs.registry.Repository(ctx, sourceRepo) + if err != nil { + return distribution.Descriptor{}, err + } + stat, err = repo.Blobs(ctx).Stat(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + } else { + // use the provided blob info + stat = *sourceStat + } + + desc := distribution.Descriptor{ + Size: stat.Size, + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + } + return desc, lbs.linkBlob(ctx, desc) +} + +// newBlobUpload allocates a new upload controller with the given state. +func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time, append bool) (distribution.BlobWriter, error) { + fw, err := lbs.driver.Writer(ctx, path, append) + if err != nil { + return nil, err + } + + bw := &blobWriter{ + ctx: ctx, + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.Digester(), + fileWriter: fw, + driver: lbs.driver, + path: path, + resumableDigestEnabled: lbs.resumableDigestEnabled, + } + + return bw, nil +} + +// linkBlob links a valid, written blob into the registry under the named +// repository for the upload controller. +func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { + dgsts := append([]digest.Digest{canonical.Digest}, aliases...) + + // TODO(stevvooe): Need to write out mediatype for only canonical hash + // since we don't care about the aliases. They are generally unused except + // for tarsum but those versions don't care about mediatype. + + // Don't make duplicate links. + seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) + + // only use the first link + linkPathFn := lbs.linkPathFns[0] + + for _, dgst := range dgsts { + if _, seen := seenDigests[dgst]; seen { + continue + } + seenDigests[dgst] = struct{}{} + + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) + if err != nil { + return err + } + + if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { + return err + } + } + + return nil +} + +type linkedBlobStatter struct { + *blobStore + repository distribution.Repository + + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed an the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc +} + +var _ distribution.BlobDescriptorService = &linkedBlobStatter{} + +func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + var ( + found bool + target digest.Digest + ) + + // try the many link path functions until we get success or an error that + // is not PathNotFoundError. + for _, linkPathFn := range lbs.linkPathFns { + var err error + target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) + + if err == nil { + found = true + break // success! + } + + switch err := err.(type) { + case driver.PathNotFoundError: + // do nothing, just move to the next linkPathFn + default: + return distribution.Descriptor{}, err + } + } + + if !found { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + if target != dgst { + // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. + context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) + } + + // TODO(stevvooe): Look up repository local mediatype and replace that on + // the returned descriptor. + + return lbs.blobStore.statter.Stat(ctx, target) +} + +func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { + // clear any possible existence of a link described in linkPathFns + for _, linkPathFn := range lbs.linkPathFns { + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) + if err != nil { + return err + } + + err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + continue // just ignore this error and continue + default: + return err + } + } + } + + return nil +} + +// resolveTargetWithFunc allows us to read a link to a resource with different +// linkPathFuncs to let us try a few different paths before returning not +// found. +func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) + if err != nil { + return "", err + } + + return lbs.blobStore.readlink(ctx, blobLinkPath) +} + +func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + // The canonical descriptor for a blob is set at the commit phase of upload + return nil +} + +// blobLinkPath provides the path to the blob link, also known as layers. +func blobLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(layerLinkPathSpec{name: name, digest: dgst}) +} + +// manifestRevisionLinkPath provides the path to the manifest revision link. +func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go b/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go new file mode 100644 index 000000000..aee73b85f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go @@ -0,0 +1,92 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/opencontainers/go-digest" +) + +// manifestListHandler is a ManifestHandler that covers schema2 manifest lists. +type manifestListHandler struct { + repository distribution.Repository + blobStore distribution.BlobStore + ctx context.Context +} + +var _ ManifestHandler = &manifestListHandler{} + +func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") + + var m manifestlist.DeserializedManifestList + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") + + m, ok := manifestList.(*manifestlist.DeserializedManifestList) + if !ok { + return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to +// store valid content, leaving trust policies of that content up to +// consumers. +func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if !skipDependencyVerification { + // This manifest service is different from the blob service + // returned by Blob. It uses a linked blob store to ensure that + // only manifests are accessible. + + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + for _, manifestDescriptor := range mnfst.References() { + exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) + if err != nil && err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + if err != nil || !exists { + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go new file mode 100644 index 000000000..4cca5157a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go @@ -0,0 +1,141 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/opencontainers/go-digest" +) + +// A ManifestHandler gets and puts manifests of a particular type. +type ManifestHandler interface { + // Unmarshal unmarshals the manifest from a byte slice. + Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest. + Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) +} + +// SkipLayerVerification allows a manifest to be Put before its +// layers are on the filesystem +func SkipLayerVerification() distribution.ManifestServiceOption { + return skipLayerOption{} +} + +type skipLayerOption struct{} + +func (o skipLayerOption) Apply(m distribution.ManifestService) error { + if ms, ok := m.(*manifestStore); ok { + ms.skipDependencyVerification = true + return nil + } + return fmt.Errorf("skip layer verification only valid for manifestStore") +} + +type manifestStore struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context + + skipDependencyVerification bool + + schema1Handler ManifestHandler + schema2Handler ManifestHandler + manifestListHandler ManifestHandler +} + +var _ distribution.ManifestService = &manifestStore{} + +func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") + + _, err := ms.blobStore.Stat(ms.ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return false, nil + } + + return false, err + } + + return true, nil +} + +func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") + + // TODO(stevvooe): Need to check descriptor from above to ensure that the + // mediatype is as we expect for the manifest store. + + content, err := ms.blobStore.Get(ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: ms.repository.Named().Name(), + Revision: dgst, + } + } + + return nil, err + } + + var versioned manifest.Versioned + if err = json.Unmarshal(content, &versioned); err != nil { + return nil, err + } + + switch versioned.SchemaVersion { + case 1: + return ms.schema1Handler.Unmarshal(ctx, dgst, content) + case 2: + // This can be an image manifest or a manifest list + switch versioned.MediaType { + case schema2.MediaTypeManifest: + return ms.schema2Handler.Unmarshal(ctx, dgst, content) + case manifestlist.MediaTypeManifestList: + return ms.manifestListHandler.Unmarshal(ctx, dgst, content) + default: + return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} + } + } + + return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) +} + +func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") + + switch manifest.(type) { + case *schema1.SignedManifest: + return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *schema2.DeserializedManifest: + return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *manifestlist.DeserializedManifestList: + return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) + } + + return "", fmt.Errorf("unrecognized manifest type %T", manifest) +} + +// Delete removes the revision of the specified manifest. +func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") + return ms.blobStore.Delete(ctx, dgst) +} + +func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error { + err := ms.blobStore.Enumerate(ctx, func(dgst digest.Digest) error { + err := ingester(dgst) + if err != nil { + return err + } + return nil + }) + return err +} diff --git a/vendor/github.com/docker/distribution/registry/storage/paths.go b/vendor/github.com/docker/distribution/registry/storage/paths.go new file mode 100644 index 000000000..b6d9b9b56 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/paths.go @@ -0,0 +1,490 @@ +package storage + +import ( + "fmt" + "path" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + storagePathVersion = "v2" // fixed storage layout version + storagePathRoot = "/docker/registry/" // all driver paths have a prefix + + // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though + // storage path root would configurable for all drivers through this + // package. In reality, we've found it simpler to do this on a per driver + // basis. +) + +// pathFor maps paths based on "object names" and their ids. The "object +// names" mapped by are internal to the storage system. +// +// The path layout in the storage backend is roughly as follows: +// +// /v2 +// -> repositories/ +// ->/ +// -> _manifests/ +// revisions +// -> +// -> link +// tags/ +// -> current/link +// -> index +// -> //link +// -> _layers/ +// +// -> _uploads/ +// data +// startedat +// hashstates// +// -> blob/ +// +// +// The storage backend layout is broken up into a content-addressable blob +// store and repositories. The content-addressable blob store holds most data +// throughout the backend, keyed by algorithm and digests of the underlying +// content. Access to the blob store is controlled through links from the +// repository to blobstore. +// +// A repository is made up of layers, manifests and tags. The layers component +// is just a directory of layers which are "linked" into a repository. A layer +// can only be accessed through a qualified repository name if it is linked in +// the repository. Uploads of layers are managed in the uploads directory, +// which is key by upload id. When all data for an upload is received, the +// data is moved into the blob store and the upload directory is deleted. +// Abandoned uploads can be garbage collected by reading the startedat file +// and removing uploads that have been active for longer than a certain time. +// +// The third component of the repository directory is the manifests store, +// which is made up of a revision store and tag store. Manifests are stored in +// the blob store and linked into the revision store. +// While the registry can save all revisions of a manifest, no relationship is +// implied as to the ordering of changes to a manifest. The tag store provides +// support for name, tag lookups of manifests, using "current/link" under a +// named tag directory. An index is maintained to support deletions of all +// revisions of a given manifest tag. +// +// We cover the path formats implemented by this path mapper below. +// +// Manifests: +// +// manifestRevisionsPathSpec: /v2/repositories//_manifests/revisions/ +// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// +// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link +// +// Tags: +// +// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ +// manifestTagPathSpec: /v2/repositories//_manifests/tags// +// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link +// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ +// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// +// manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link +// +// Blobs: +// +// layerLinkPathSpec: /v2/repositories//_layers///link +// +// Uploads: +// +// uploadDataPathSpec: /v2/repositories//_uploads//data +// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat +// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// +// +// Blob Store: +// +// blobsPathSpec: /v2/blobs/ +// blobPathSpec: /v2/blobs/// +// blobDataPathSpec: /v2/blobs////data +// blobMediaTypePathSpec: /v2/blobs////data +// +// For more information on the semantic meaning of each path and their +// contents, please see the path spec documentation. +func pathFor(spec pathSpec) (string, error) { + + // Switch on the path object type and return the appropriate path. At + // first glance, one may wonder why we don't use an interface to + // accomplish this. By keep the formatting separate from the pathSpec, we + // keep separate the path generation componentized. These specs could be + // passed to a completely different mapper implementation and generate a + // different set of paths. + // + // For example, imagine migrating from one backend to the other: one could + // build a filesystem walker that converts a string path in one version, + // to an intermediate path object, than can be consumed and mapped by the + // other version. + + rootPrefix := []string{storagePathRoot, storagePathVersion} + repoPrefix := append(rootPrefix, "repositories") + + switch v := spec.(type) { + + case manifestRevisionsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "revisions")...), nil + + case manifestRevisionPathSpec: + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil + case manifestRevisionLinkPathSpec: + root, err := pathFor(manifestRevisionPathSpec{ + name: v.name, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestTagsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil + case manifestTagPathSpec: + root, err := pathFor(manifestTagsPathSpec{ + name: v.name, + }) + + if err != nil { + return "", err + } + + return path.Join(root, v.tag), nil + case manifestTagCurrentPathSpec: + root, err := pathFor(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "current", "link"), nil + case manifestTagIndexPathSpec: + root, err := pathFor(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "index"), nil + case manifestTagIndexEntryLinkPathSpec: + root, err := pathFor(manifestTagIndexEntryPathSpec{ + name: v.name, + tag: v.tag, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestTagIndexEntryPathSpec: + root, err := pathFor(manifestTagIndexPathSpec{ + name: v.name, + tag: v.tag, + }) + + if err != nil { + return "", err + } + + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(root, path.Join(components...)), nil + case layerLinkPathSpec: + components, err := digestPathComponents(v.digest, false) + if err != nil { + return "", err + } + + // TODO(stevvooe): Right now, all blobs are linked under "_layers". If + // we have future migrations, we may want to rename this to "_blobs". + // A migration strategy would simply leave existing items in place and + // write the new paths, commit a file then delete the old files. + + blobLinkPathComponents := append(repoPrefix, v.name, "_layers") + + return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil + case blobsPathSpec: + blobsPathPrefix := append(rootPrefix, "blobs") + return path.Join(blobsPathPrefix...), nil + case blobPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + case blobDataPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + components = append(components, "data") + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + + case uploadDataPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil + case uploadStartedAtPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil + case uploadHashStatePathSpec: + offset := fmt.Sprintf("%d", v.offset) + if v.list { + offset = "" // Limit to the prefix for listing offsets. + } + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil + case repositoriesRootPathSpec: + return path.Join(repoPrefix...), nil + default: + // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). + return "", fmt.Errorf("unknown path spec: %#v", v) + } +} + +// pathSpec is a type to mark structs as path specs. There is no +// implementation because we'd like to keep the specs and the mappers +// decoupled. +type pathSpec interface { + pathSpec() +} + +// manifestRevisionsPathSpec describes the directory path for +// a manifest revision. +type manifestRevisionsPathSpec struct { + name string +} + +func (manifestRevisionsPathSpec) pathSpec() {} + +// manifestRevisionPathSpec describes the components of the directory path for +// a manifest revision. +type manifestRevisionPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionPathSpec) pathSpec() {} + +// manifestRevisionLinkPathSpec describes the path components required to look +// up the data link for a revision of a manifest. If this file is not present, +// the manifest blob is not available in the given repo. The contents of this +// file should just be the digest. +type manifestRevisionLinkPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionLinkPathSpec) pathSpec() {} + +// manifestTagsPathSpec describes the path elements required to point to the +// manifest tags directory. +type manifestTagsPathSpec struct { + name string +} + +func (manifestTagsPathSpec) pathSpec() {} + +// manifestTagPathSpec describes the path elements required to point to the +// manifest tag links files under a repository. These contain a blob id that +// can be used to look up the data and signatures. +type manifestTagPathSpec struct { + name string + tag string +} + +func (manifestTagPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the current revision for a +// given tag. +type manifestTagCurrentPathSpec struct { + name string + tag string +} + +func (manifestTagCurrentPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the index of revisions +// with the given tag. +type manifestTagIndexPathSpec struct { + name string + tag string +} + +func (manifestTagIndexPathSpec) pathSpec() {} + +// manifestTagIndexEntryPathSpec contains the entries of the index by revision. +type manifestTagIndexEntryPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryPathSpec) pathSpec() {} + +// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a +// manifest with given tag within the index. +type manifestTagIndexEntryLinkPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} + +// blobLinkPathSpec specifies a path for a blob link, which is a file with a +// blob id. The blob link will contain a content addressable blob id reference +// into the blob store. The format of the contents is as follows: +// +// : +// +// The following example of the file contents is more illustrative: +// +// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 +// +// This indicates that there is a blob with the id/digest, calculated via +// sha256 that can be fetched from the blob store. +type layerLinkPathSpec struct { + name string + digest digest.Digest +} + +func (layerLinkPathSpec) pathSpec() {} + +// blobAlgorithmReplacer does some very simple path sanitization for user +// input. Paths should be "safe" before getting this far due to strict digest +// requirements but we can add further path conversion here, if needed. +var blobAlgorithmReplacer = strings.NewReplacer( + "+", "/", + ".", "/", + ";", "/", +) + +// blobsPathSpec contains the path for the blobs directory +type blobsPathSpec struct{} + +func (blobsPathSpec) pathSpec() {} + +// blobPathSpec contains the path for the registry global blob store. +type blobPathSpec struct { + digest digest.Digest +} + +func (blobPathSpec) pathSpec() {} + +// blobDataPathSpec contains the path for the registry global blob store. For +// now, this contains layer data, exclusively. +type blobDataPathSpec struct { + digest digest.Digest +} + +func (blobDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters of the data file for +// uploads. +type uploadDataPathSpec struct { + name string + id string +} + +func (uploadDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters for the file that stores the +// start time of an uploads. If it is missing, the upload is considered +// unknown. Admittedly, the presence of this file is an ugly hack to make sure +// we have a way to cleanup old or stalled uploads that doesn't rely on driver +// FileInfo behavior. If we come up with a more clever way to do this, we +// should remove this file immediately and rely on the startetAt field from +// the client to enforce time out policies. +type uploadStartedAtPathSpec struct { + name string + id string +} + +func (uploadStartedAtPathSpec) pathSpec() {} + +// uploadHashStatePathSpec defines the path parameters for the file that stores +// the hash function state of an upload at a specific byte offset. If `list` is +// set, then the path mapper will generate a list prefix for all hash state +// offsets for the upload identified by the name, id, and alg. +type uploadHashStatePathSpec struct { + name string + id string + alg digest.Algorithm + offset int64 + list bool +} + +func (uploadHashStatePathSpec) pathSpec() {} + +// repositoriesRootPathSpec returns the root of repositories +type repositoriesRootPathSpec struct { +} + +func (repositoriesRootPathSpec) pathSpec() {} + +// digestPathComponents provides a consistent path breakdown for a given +// digest. For a generic digest, it will be as follows: +// +// / +// +// If multilevel is true, the first two bytes of the digest will separate +// groups of digest folder. It will be as follows: +// +// // +// +func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { + if err := dgst.Validate(); err != nil { + return nil, err + } + + algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) + hex := dgst.Hex() + prefix := []string{algorithm} + + var suffix []string + + if multilevel { + suffix = append(suffix, hex[:2]) + } + + suffix = append(suffix, hex) + + return append(prefix, suffix...), nil +} + +// Reconstructs a digest from a path +func digestFromPath(digestPath string) (digest.Digest, error) { + + digestPath = strings.TrimSuffix(digestPath, "/data") + dir, hex := path.Split(digestPath) + dir = path.Dir(dir) + dir, next := path.Split(dir) + + // next is either the algorithm OR the first two characters in the hex string + var algo string + if next == hex[:2] { + algo = path.Base(dir) + } else { + algo = next + } + + dgst := digest.NewDigestFromHex(algo, hex) + return dgst, dgst.Validate() +} diff --git a/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go b/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go new file mode 100644 index 000000000..925b1ae9b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/purgeuploads.go @@ -0,0 +1,139 @@ +package storage + +import ( + "path" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storageDriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" +) + +// uploadData stored the location of temporary files created during a layer upload +// along with the date the upload was started +type uploadData struct { + containingDir string + startedAt time.Time +} + +func newUploadData() uploadData { + return uploadData{ + containingDir: "", + // default to far in future to protect against missing startedat + startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), + } +} + +// PurgeUploads deletes files from the upload directory +// created before olderThan. The list of files deleted and errors +// encountered are returned +func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { + log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) + uploadData, errors := getOutstandingUploads(ctx, driver) + var deleted []string + for _, uploadData := range uploadData { + if uploadData.startedAt.Before(olderThan) { + var err error + log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", + uploadData.containingDir, uploadData.startedAt, olderThan) + if actuallyDelete { + err = driver.Delete(ctx, uploadData.containingDir) + } + if err == nil { + deleted = append(deleted, uploadData.containingDir) + } else { + errors = append(errors, err) + } + } + } + + log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) + return deleted, errors +} + +// getOutstandingUploads walks the upload directory, collecting files +// which could be eligible for deletion. The only reliable way to +// classify the age of a file is with the date stored in the startedAt +// file, so gather files by UUID with a date from startedAt. +func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { + var errors []error + uploads := make(map[string]uploadData, 0) + + inUploadDir := false + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return uploads, append(errors, err) + } + + err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + if file[0] == '_' { + // Reserved directory + inUploadDir = (file == "_uploads") + + if fileInfo.IsDir() && !inUploadDir { + return ErrSkipDir + } + + } + + uuid, isContainingDir := uuidFromPath(filePath) + if uuid == "" { + // Cannot reliably delete + return nil + } + ud, ok := uploads[uuid] + if !ok { + ud = newUploadData() + } + if isContainingDir { + ud.containingDir = filePath + } + if file == "startedat" { + if t, err := readStartedAtFile(driver, filePath); err == nil { + ud.startedAt = t + } else { + errors = pushError(errors, filePath, err) + } + + } + + uploads[uuid] = ud + return nil + }) + + if err != nil { + errors = pushError(errors, root, err) + } + return uploads, errors +} + +// uuidFromPath extracts the upload UUID from a given path +// If the UUID is the last path component, this is the containing +// directory for all upload files +func uuidFromPath(path string) (string, bool) { + components := strings.Split(path, "/") + for i := len(components) - 1; i >= 0; i-- { + if u, err := uuid.Parse(components[i]); err == nil { + return u.String(), i == len(components)-1 + } + } + return "", false +} + +// readStartedAtFile reads the date from an upload's startedAtFile +func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { + // todo:(richardscothern) - pass in a context + startedAtBytes, err := driver.GetContent(context.Background(), path) + if err != nil { + return time.Now(), err + } + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return time.Now(), err + } + return startedAt, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/registry.go b/vendor/github.com/docker/distribution/registry/storage/registry.go new file mode 100644 index 000000000..20525ffb3 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/registry.go @@ -0,0 +1,306 @@ +package storage + +import ( + "regexp" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/libtrust" +) + +// registry is the top-level implementation of Registry for use in the storage +// package. All instances should descend from this object. +type registry struct { + blobStore *blobStore + blobServer *blobServer + statter *blobStatter // global statter service. + blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider + deleteEnabled bool + resumableDigestEnabled bool + schema1SigningKey libtrust.PrivateKey + blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory + manifestURLs manifestURLs +} + +// manifestURLs holds regular expressions for controlling manifest URL whitelisting +type manifestURLs struct { + allow *regexp.Regexp + deny *regexp.Regexp +} + +// RegistryOption is the type used for functional options for NewRegistry. +type RegistryOption func(*registry) error + +// EnableRedirect is a functional option for NewRegistry. It causes the backend +// blob server to attempt using (StorageDriver).URLFor to serve all blobs. +func EnableRedirect(registry *registry) error { + registry.blobServer.redirect = true + return nil +} + +// EnableDelete is a functional option for NewRegistry. It enables deletion on +// the registry. +func EnableDelete(registry *registry) error { + registry.deleteEnabled = true + return nil +} + +// DisableDigestResumption is a functional option for NewRegistry. It should be +// used if the registry is acting as a caching proxy. +func DisableDigestResumption(registry *registry) error { + registry.resumableDigestEnabled = false + return nil +} + +// ManifestURLsAllowRegexp is a functional option for NewRegistry. +func ManifestURLsAllowRegexp(r *regexp.Regexp) RegistryOption { + return func(registry *registry) error { + registry.manifestURLs.allow = r + return nil + } +} + +// ManifestURLsDenyRegexp is a functional option for NewRegistry. +func ManifestURLsDenyRegexp(r *regexp.Regexp) RegistryOption { + return func(registry *registry) error { + registry.manifestURLs.deny = r + return nil + } +} + +// Schema1SigningKey returns a functional option for NewRegistry. It sets the +// key for signing all schema1 manifests. +func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption { + return func(registry *registry) error { + registry.schema1SigningKey = key + return nil + } +} + +// BlobDescriptorServiceFactory returns a functional option for NewRegistry. It sets the +// factory to create BlobDescriptorServiceFactory middleware. +func BlobDescriptorServiceFactory(factory distribution.BlobDescriptorServiceFactory) RegistryOption { + return func(registry *registry) error { + registry.blobDescriptorServiceFactory = factory + return nil + } +} + +// BlobDescriptorCacheProvider returns a functional option for +// NewRegistry. It creates a cached blob statter for use by the +// registry. +func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption { + // TODO(aaronl): The duplication of statter across several objects is + // ugly, and prevents us from using interface types in the registry + // struct. Ideally, blobStore and blobServer should be lazily + // initialized, and use the current value of + // blobDescriptorCacheProvider. + return func(registry *registry) error { + if blobDescriptorCacheProvider != nil { + statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter) + registry.blobStore.statter = statter + registry.blobServer.statter = statter + registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider + } + return nil + } +} + +// NewRegistry creates a new registry instance from the provided driver. The +// resulting registry may be shared by multiple goroutines but is cheap to +// allocate. If the Redirect option is specified, the backend blob server will +// attempt to use (StorageDriver).URLFor to serve all blobs. +func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) { + // create global statter + statter := &blobStatter{ + driver: driver, + } + + bs := &blobStore{ + driver: driver, + statter: statter, + } + + registry := ®istry{ + blobStore: bs, + blobServer: &blobServer{ + driver: driver, + statter: statter, + pathFn: bs.path, + }, + statter: statter, + resumableDigestEnabled: true, + } + + for _, option := range options { + if err := option(registry); err != nil { + return nil, err + } + } + + return registry, nil +} + +// Scope returns the namespace scope for a registry. The registry +// will only serve repositories contained within this scope. +func (reg *registry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +// Repository returns an instance of the repository tied to the registry. +// Instances should not be shared between goroutines but are cheap to +// allocate. In general, they should be request scoped. +func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) { + var descriptorCache distribution.BlobDescriptorService + if reg.blobDescriptorCacheProvider != nil { + var err error + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name()) + if err != nil { + return nil, err + } + } + + return &repository{ + ctx: ctx, + registry: reg, + name: canonicalName, + descriptorCache: descriptorCache, + }, nil +} + +func (reg *registry) Blobs() distribution.BlobEnumerator { + return reg.blobStore +} + +func (reg *registry) BlobStatter() distribution.BlobStatter { + return reg.statter +} + +// repository provides name-scoped access to various services. +type repository struct { + *registry + ctx context.Context + name reference.Named + descriptorCache distribution.BlobDescriptorService +} + +// Name returns the name of the repository. +func (repo *repository) Named() reference.Named { + return repo.name +} + +func (repo *repository) Tags(ctx context.Context) distribution.TagService { + tags := &tagStore{ + repository: repo, + blobStore: repo.registry.blobStore, + } + + return tags +} + +// Manifests returns an instance of ManifestService. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifestLinkPathFns := []linkPathFunc{ + // NOTE(stevvooe): Need to search through multiple locations since + // 2.1.0 unintentionally linked into _layers. + manifestRevisionLinkPath, + blobLinkPath, + } + + manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()} + + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, + } + + if repo.registry.blobDescriptorServiceFactory != nil { + statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) + } + + blobStore := &linkedBlobStore{ + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: statter, + + // TODO(stevvooe): linkPath limits this blob store to only + // manifests. This instance cannot be used for blob checks. + linkPathFns: manifestLinkPathFns, + linkDirectoryPathSpec: manifestDirectoryPathSpec, + } + + ms := &manifestStore{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + schema1Handler: &signedManifestHandler{ + ctx: ctx, + schema1SigningKey: repo.schema1SigningKey, + repository: repo, + blobStore: blobStore, + }, + schema2Handler: &schema2ManifestHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + manifestURLs: repo.registry.manifestURLs, + }, + manifestListHandler: &manifestListHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + }, + } + + // Apply options + for _, option := range options { + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + + return ms, nil +} + +// Blobs returns an instance of the BlobStore. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: []linkPathFunc{blobLinkPath}, + } + + if repo.descriptorCache != nil { + statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) + } + + if repo.registry.blobDescriptorServiceFactory != nil { + statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) + } + + return &linkedBlobStore{ + registry: repo.registry, + blobStore: repo.blobStore, + blobServer: repo.blobServer, + blobAccessController: statter, + repository: repo, + ctx: ctx, + + // TODO(stevvooe): linkPath limits this blob store to only layers. + // This instance cannot be used for manifest checks. + linkPathFns: []linkPathFunc{blobLinkPath}, + deleteEnabled: repo.registry.deleteEnabled, + resumableDigestEnabled: repo.resumableDigestEnabled, + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go new file mode 100644 index 000000000..05c53254f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go @@ -0,0 +1,136 @@ +package storage + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/opencontainers/go-digest" +) + +var ( + errUnexpectedURL = errors.New("unexpected URL on layer") + errMissingURL = errors.New("missing URL on layer") + errInvalidURL = errors.New("invalid URL on layer") +) + +//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. +type schema2ManifestHandler struct { + repository distribution.Repository + blobStore distribution.BlobStore + ctx context.Context + manifestURLs manifestURLs +} + +var _ ManifestHandler = &schema2ManifestHandler{} + +func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") + + var m schema2.DeserializedManifest + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") + + m, ok := manifest.(*schema2.DeserializedManifest) + if !ok { + return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to store +// valid content, leaving trust policies of that content up to consumers. +func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if skipDependencyVerification { + return nil + } + + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + blobsService := ms.repository.Blobs(ctx) + + for _, descriptor := range mnfst.References() { + var err error + + switch descriptor.MediaType { + case schema2.MediaTypeForeignLayer: + // Clients download this layer from an external URL, so do not check for + // its presense. + if len(descriptor.URLs) == 0 { + err = errMissingURL + } + allow := ms.manifestURLs.allow + deny := ms.manifestURLs.deny + for _, u := range descriptor.URLs { + var pu *url.URL + pu, err = url.Parse(u) + if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) { + err = errInvalidURL + break + } + } + case schema2.MediaTypeManifest, schema1.MediaTypeManifest: + var exists bool + exists, err = manifestService.Exists(ctx, descriptor.Digest) + if err != nil || !exists { + err = distribution.ErrBlobUnknown // just coerce to unknown. + } + + fallthrough // double check the blob store. + default: + // forward all else to blob storage + if len(descriptor.URLs) == 0 { + _, err = blobsService.Stat(ctx, descriptor.Digest) + } + } + + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest}) + } + } + + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go new file mode 100644 index 000000000..6ca1c6c8c --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go @@ -0,0 +1,141 @@ +package storage + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It +// can unmarshal and put schema1 manifests that have been signed by libtrust. +type signedManifestHandler struct { + repository distribution.Repository + schema1SigningKey libtrust.PrivateKey + blobStore distribution.BlobStore + ctx context.Context +} + +var _ ManifestHandler = &signedManifestHandler{} + +func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") + + var ( + signatures [][]byte + err error + ) + + jsig, err := libtrust.NewJSONSignature(content, signatures...) + if err != nil { + return nil, err + } + + if ms.schema1SigningKey != nil { + if err := jsig.Sign(ms.schema1SigningKey); err != nil { + return nil, err + } + } + + // Extract the pretty JWS + raw, err := jsig.PrettySignature("signatures") + if err != nil { + return nil, err + } + + var sm schema1.SignedManifest + if err := json.Unmarshal(raw, &sm); err != nil { + return nil, err + } + return &sm, nil +} + +func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") + + sm, ok := manifest.(*schema1.SignedManifest) + if !ok { + return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { + return "", err + } + + mt := schema1.MediaTypeManifest + payload := sm.Canonical + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. It ensures that the signature is valid for the +// enclosed payload. As a policy, the registry only tries to store valid +// content, leaving trust policies of that content up to consumers. +func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if len(mnfst.Name) > reference.NameTotalLengthMax { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), + }) + } + + if !reference.NameRegexp.MatchString(mnfst.Name) { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("invalid manifest name format"), + }) + } + + if len(mnfst.History) != len(mnfst.FSLayers) { + errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", + len(mnfst.History), len(mnfst.FSLayers))) + } + + if _, err := schema1.Verify(&mnfst); err != nil { + switch err { + case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: + errs = append(errs, distribution.ErrManifestUnverified{}) + default: + if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust + errs = append(errs, distribution.ErrManifestUnverified{}) + } else { + errs = append(errs, err) + } + } + } + + if !skipDependencyVerification { + for _, fsLayer := range mnfst.References() { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore.go b/vendor/github.com/docker/distribution/registry/storage/tagstore.go new file mode 100644 index 000000000..d73278869 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/tagstore.go @@ -0,0 +1,191 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/opencontainers/go-digest" +) + +var _ distribution.TagService = &tagStore{} + +// tagStore provides methods to manage manifest tags in a backend storage driver. +// This implementation uses the same on-disk layout as the (now deleted) tag +// store. This provides backward compatibility with current registry deployments +// which only makes use of the Digest field of the returned distribution.Descriptor +// but does not enable full roundtripping of Descriptor objects +type tagStore struct { + repository *repository + blobStore *blobStore +} + +// All returns all tags +func (ts *tagStore) All(ctx context.Context) ([]string, error) { + var tags []string + + pathSpec, err := pathFor(manifestTagPathSpec{ + name: ts.repository.Named().Name(), + }) + if err != nil { + return tags, err + } + + entries, err := ts.blobStore.driver.List(ctx, pathSpec) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Named().Name()} + default: + return tags, err + } + } + + for _, entry := range entries { + _, filename := path.Split(entry) + tags = append(tags, filename) + } + + return tags, nil +} + +// exists returns true if the specified manifest tag exists in the repository. +func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { + tagPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + if err != nil { + return false, err + } + + exists, err := exists(ctx, ts.blobStore.driver, tagPath) + if err != nil { + return false, err + } + + return exists, nil +} + +// Tag tags the digest with the given tag, updating the the store to point at +// the current tag. The digest must point to a manifest. +func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + currentPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + if err != nil { + return err + } + + lbs := ts.linkedBlobStore(ctx, tag) + + // Link into the index + if err := lbs.linkBlob(ctx, desc); err != nil { + return err + } + + // Overwrite the current link + return ts.blobStore.link(ctx, currentPath, desc.Digest) +} + +// resolve the current revision for name and tag. +func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + currentPath, err := pathFor(manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + if err != nil { + return distribution.Descriptor{}, err + } + + revision, err := ts.blobStore.readlink(ctx, currentPath) + if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} + } + + return distribution.Descriptor{}, err + } + + return distribution.Descriptor{Digest: revision}, nil +} + +// Untag removes the tag association +func (ts *tagStore) Untag(ctx context.Context, tag string) error { + tagPath, err := pathFor(manifestTagPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }) + + switch err.(type) { + case storagedriver.PathNotFoundError: + return distribution.ErrTagUnknown{Tag: tag} + case nil: + break + default: + return err + } + + return ts.blobStore.driver.Delete(ctx, tagPath) +} + +// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one +// to index manifest blobs by tag name. While the tag store doesn't map +// precisely to the linked blob store, using this ensures the links are +// managed via the same code path. +func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { + return &linkedBlobStore{ + blobStore: ts.blobStore, + repository: ts.repository, + ctx: ctx, + linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestTagIndexEntryLinkPathSpec{ + name: name, + tag: tag, + revision: dgst, + }) + + }}, + } +} + +// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by +// digest, tag entries which point to it need to be recovered to avoid dangling tags. +func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { + allTags, err := ts.All(ctx) + switch err.(type) { + case distribution.ErrRepositoryUnknown: + // This tag store has been initialized but not yet populated + break + case nil: + break + default: + return nil, err + } + + var tags []string + for _, tag := range allTags { + tagLinkPathSpec := manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + } + + tagLinkPath, err := pathFor(tagLinkPathSpec) + tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) + if err != nil { + return nil, err + } + + if tagDigest == desc.Digest { + tags = append(tags, tag) + } + } + + return tags, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/util.go b/vendor/github.com/docker/distribution/registry/storage/util.go new file mode 100644 index 000000000..773d7ba0b --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/util.go @@ -0,0 +1,21 @@ +package storage + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// Exists provides a utility method to test whether or not a path exists in +// the given driver. +func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { + if _, err := drv.Stat(ctx, path); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return false, nil + default: + return false, err + } + } + + return true, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/vacuum.go b/vendor/github.com/docker/distribution/registry/storage/vacuum.go new file mode 100644 index 000000000..42c8ef605 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/vacuum.go @@ -0,0 +1,67 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" + "github.com/opencontainers/go-digest" +) + +// vacuum contains functions for cleaning up repositories and blobs +// These functions will only reliably work on strongly consistent +// storage systems. +// https://en.wikipedia.org/wiki/Consistency_model + +// NewVacuum creates a new Vacuum +func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { + return Vacuum{ + ctx: ctx, + driver: driver, + } +} + +// Vacuum removes content from the filesystem +type Vacuum struct { + driver driver.StorageDriver + ctx context.Context +} + +// RemoveBlob removes a blob from the filesystem +func (v Vacuum) RemoveBlob(dgst string) error { + d, err := digest.Parse(dgst) + if err != nil { + return err + } + + blobPath, err := pathFor(blobPathSpec{digest: d}) + if err != nil { + return err + } + + context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) + + err = v.driver.Delete(v.ctx, blobPath) + if err != nil { + return err + } + + return nil +} + +// RemoveRepository removes a repository directory from the +// filesystem +func (v Vacuum) RemoveRepository(repoName string) error { + rootForRepository, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return err + } + repoDir := path.Join(rootForRepository, repoName) + context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) + err = v.driver.Delete(v.ctx, repoDir) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/walk.go b/vendor/github.com/docker/distribution/registry/storage/walk.go new file mode 100644 index 000000000..d979796eb --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/walk.go @@ -0,0 +1,59 @@ +package storage + +import ( + "errors" + "fmt" + "sort" + + "github.com/docker/distribution/context" + storageDriver "github.com/docker/distribution/registry/storage/driver" +) + +// ErrSkipDir is used as a return value from onFileFunc to indicate that +// the directory named in the call is to be skipped. It is not returned +// as an error by any function. +var ErrSkipDir = errors.New("skip this directory") + +// WalkFn is called once per file by Walk +// If the returned error is ErrSkipDir and fileInfo refers +// to a directory, the directory will not be entered and Walk +// will continue the traversal. Otherwise Walk will return +type WalkFn func(fileInfo storageDriver.FileInfo) error + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { + children, err := driver.List(ctx, from) + if err != nil { + return err + } + sort.Stable(sort.StringSlice(children)) + for _, child := range children { + // TODO(stevvooe): Calling driver.Stat for every entry is quite + // expensive when running against backends with a slow Stat + // implementation, such as s3. This is very likely a serious + // performance bottleneck. + fileInfo, err := driver.Stat(ctx, child) + if err != nil { + return err + } + err = f(fileInfo) + skipDir := (err == ErrSkipDir) + if err != nil && !skipDir { + return err + } + + if fileInfo.IsDir() && !skipDir { + if err := Walk(ctx, driver, child, f); err != nil { + return err + } + } + } + return nil +} + +// pushError formats an error type given a path and an error +// and pushes it to a slice of errors +func pushError(errors []error, path string, err error) []error { + return append(errors, fmt.Errorf("%s: %s", path, err)) +} diff --git a/vendor/github.com/docker/docker/builder/builder.go b/vendor/github.com/docker/docker/builder/builder.go new file mode 100644 index 000000000..cc7c25955 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/builder.go @@ -0,0 +1,99 @@ +// Package builder defines interfaces for any Docker builder to implement. +// +// Historically, only server-side Dockerfile interpreters existed. +// This package allows for other implementations of Docker builders. +package builder + +import ( + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + containerpkg "github.com/docker/docker/container" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" +) + +// Source defines a location that can be used as a source for the ADD/COPY +// instructions in the builder. +type Source interface { + // Root returns root path for accessing source + Root() string + // Close allows to signal that the filesystem tree won't be used anymore. + // For Context implementations using a temporary directory, it is recommended to + // delete the temporary directory in Close(). + Close() error + // Hash returns a checksum for a file + Hash(path string) (string, error) +} + +// Backend abstracts calls to a Docker Daemon. +type Backend interface { + ImageBackend + + // ContainerAttachRaw attaches to container. + ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error + // ContainerCreate creates a new Docker container and returns potential warnings + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + // ContainerRm removes a container specified by `id`. + ContainerRm(name string, config *types.ContainerRmConfig) error + // Commit creates a new Docker image from an existing Docker container. + Commit(string, *backend.ContainerCommitConfig) (string, error) + // ContainerKill stops the container execution abruptly. + ContainerKill(containerID string, sig uint64) error + // ContainerStart starts a new container + ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + // ContainerWait stops processing until the given container is stopped. + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) + // ContainerCreateWorkdir creates the workdir + ContainerCreateWorkdir(containerID string) error + + // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container + // specified by a container object. + // TODO: extract in the builder instead of passing `decompress` + // TODO: use containerd/fs.changestream instead as a source + CopyOnBuild(containerID string, destPath string, srcRoot string, srcPath string, decompress bool) error +} + +// ImageBackend are the interface methods required from an image component +type ImageBackend interface { + GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ReleaseableLayer, error) +} + +// Result is the output produced by a Builder +type Result struct { + ImageID string + FromImage Image +} + +// ImageCacheBuilder represents a generator for stateful image cache. +type ImageCacheBuilder interface { + // MakeImageCache creates a stateful image cache. + MakeImageCache(cacheFrom []string) ImageCache +} + +// ImageCache abstracts an image cache. +// (parent image, child runconfig) -> child image +type ImageCache interface { + // GetCache returns a reference to a cached image whose parent equals `parent` + // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. + GetCache(parentID string, cfg *container.Config) (imageID string, err error) +} + +// Image represents a Docker image used by the builder. +type Image interface { + ImageID() string + RunConfig() *container.Config +} + +// ReleaseableLayer is an image layer that can be mounted and released +type ReleaseableLayer interface { + Release() error + Mount() (string, error) +} diff --git a/vendor/github.com/docker/docker/cli/cli.go b/vendor/github.com/docker/docker/cli/cli.go new file mode 100644 index 000000000..428166719 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/cli.go @@ -0,0 +1,25 @@ +package cli + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/homedir" +) + +var ( + configDir = os.Getenv("DOCKER_CONFIG") + configFileDir = ".docker" +) + +// ConfigurationDir returns the path to the configuration directory as specified by the DOCKER_CONFIG environment variable. +// TODO: this was copied from cli/config/configfile and should be removed once cmd/dockerd moves +func ConfigurationDir() string { + return configDir +} + +func init() { + if configDir == "" { + configDir = filepath.Join(homedir.Get(), configFileDir) + } +} diff --git a/vendor/github.com/docker/docker/cli/cobra.go b/vendor/github.com/docker/docker/cli/cobra.go new file mode 100644 index 000000000..c7bb39c43 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/cobra.go @@ -0,0 +1,150 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// SetupRootCommand sets default usage, help, and error handling for the +// root command. +func SetupRootCommand(rootCmd *cobra.Command) { + cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) + cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) + cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) + cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) + + rootCmd.SetUsageTemplate(usageTemplate) + rootCmd.SetHelpTemplate(helpTemplate) + rootCmd.SetFlagErrorFunc(FlagErrorFunc) + rootCmd.SetHelpCommand(helpCommand) + + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") + rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") +} + +// FlagErrorFunc prints an error message which matches the format of the +// docker/docker/cli error messages +func FlagErrorFunc(cmd *cobra.Command, err error) error { + if err == nil { + return nil + } + + usage := "" + if cmd.HasSubCommands() { + usage = "\n\n" + cmd.UsageString() + } + return StatusError{ + Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), + StatusCode: 125, + } +} + +var helpCommand = &cobra.Command{ + Use: "help [command]", + Short: "Help about the command", + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + PersistentPostRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(c *cobra.Command, args []string) error { + cmd, args, e := c.Root().Find(args) + if cmd == nil || e != nil || len(args) > 0 { + return errors.Errorf("unknown help topic: %v", strings.Join(args, " ")) + } + + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + return nil + }, +} + +func hasSubCommands(cmd *cobra.Command) bool { + return len(operationSubCommands(cmd)) > 0 +} + +func hasManagementSubCommands(cmd *cobra.Command) bool { + return len(managementSubCommands(cmd)) > 0 +} + +func operationSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && !sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func wrappedFlagUsages(cmd *cobra.Command) string { + width := 80 + if ws, err := term.GetWinsize(0); err == nil { + width = int(ws.Width) + } + return cmd.Flags().FlagUsagesWrapped(width - 1) +} + +func managementSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +var usageTemplate = `Usage: + +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} + +{{ .Short | trim }} + +{{- if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} + +{{- end}} +{{- if .HasExample}} + +Examples: +{{ .Example }} + +{{- end}} +{{- if .HasFlags}} + +Options: +{{ wrappedFlagUsages . | trimRightSpace}} + +{{- end}} +{{- if hasManagementSubCommands . }} + +Management Commands: + +{{- range managementSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} + +{{- end}} +{{- if hasSubCommands .}} + +Commands: + +{{- range operationSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} +{{- end}} + +{{- if .HasSubCommands }} + +Run '{{.CommandPath}} COMMAND --help' for more information on a command. +{{- end}} +` + +var helpTemplate = ` +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/vendor/github.com/docker/docker/cli/error.go b/vendor/github.com/docker/docker/cli/error.go new file mode 100644 index 000000000..62f62433b --- /dev/null +++ b/vendor/github.com/docker/docker/cli/error.go @@ -0,0 +1,33 @@ +package cli + +import ( + "fmt" + "strings" +) + +// Errors is a list of errors. +// Useful in a loop if you don't want to return the error right away and you want to display after the loop, +// all the errors that happened during the loop. +type Errors []error + +func (errList Errors) Error() string { + if len(errList) < 1 { + return "" + } + + out := make([]string, len(errList)) + for i := range errList { + out[i] = errList[i].Error() + } + return strings.Join(out, ", ") +} + +// StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/vendor/github.com/docker/docker/cli/required.go b/vendor/github.com/docker/docker/cli/required.go new file mode 100644 index 000000000..d28af86be --- /dev/null +++ b/vendor/github.com/docker/docker/cli/required.go @@ -0,0 +1,96 @@ +package cli + +import ( + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// NoArgs validates args and returns an error if there are any args +func NoArgs(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } + + if cmd.HasSubCommands() { + return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) + } + + return errors.Errorf( + "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) +} + +// RequiresMinArgs returns an error if there is not at least min args +func RequiresMinArgs(min int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min { + return nil + } + return errors.Errorf( + "\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresMaxArgs returns an error if there is not at most max args +func RequiresMaxArgs(max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) <= max { + return nil + } + return errors.Errorf( + "\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresRangeArgs returns an error if there is not at least min args and at most max args +func RequiresRangeArgs(min int, max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min && len(args) <= max { + return nil + } + return errors.Errorf( + "\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// ExactArgs returns an error if there is not the exact number of args +func ExactArgs(number int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) == number { + return nil + } + return errors.Errorf( + "\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + number, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} diff --git a/vendor/github.com/docker/docker/runconfig/config.go b/vendor/github.com/docker/docker/runconfig/config.go new file mode 100644 index 000000000..c9dc6e96e --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config.go @@ -0,0 +1,108 @@ +package runconfig + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/volume" +) + +// ContainerDecoder implements httputils.ContainerDecoder +// calling DecodeContainerConfig. +type ContainerDecoder struct{} + +// DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + return DecodeContainerConfig(src) +} + +// DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + return DecodeHostConfig(src) +} + +// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper +// struct and returns both a Config and a HostConfig struct +// Be aware this function is not checking whether the resulted structs are nil, +// it's your business to do so +func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var w ContainerConfigWrapper + + decoder := json.NewDecoder(src) + if err := decoder.Decode(&w); err != nil { + return nil, nil, nil, err + } + + hc := w.getHostConfig() + + // Perform platform-specific processing of Volumes and Binds. + if w.Config != nil && hc != nil { + + // Initialize the volumes map if currently nil + if w.Config.Volumes == nil { + w.Config.Volumes = make(map[string]struct{}) + } + + // Now validate all the volumes and binds + if err := validateMountSettings(w.Config, hc); err != nil { + return nil, nil, nil, err + } + } + + // Certain parameters need daemon-side validation that cannot be done + // on the client, as only the daemon knows what is valid for the platform. + if err := validateNetMode(w.Config, hc); err != nil { + return nil, nil, nil, err + } + + // Validate isolation + if err := validateIsolation(hc); err != nil { + return nil, nil, nil, err + } + + // Validate QoS + if err := validateQoS(hc); err != nil { + return nil, nil, nil, err + } + + // Validate Resources + if err := validateResources(hc, sysinfo.New(true)); err != nil { + return nil, nil, nil, err + } + + // Validate Privileged + if err := validatePrivileged(hc); err != nil { + return nil, nil, nil, err + } + + // Validate ReadonlyRootfs + if err := validateReadonlyRootfs(hc); err != nil { + return nil, nil, nil, err + } + + return w.Config, hc, w.NetworkingConfig, nil +} + +// validateMountSettings validates each of the volumes and bind settings +// passed by the caller to ensure they are valid. +func validateMountSettings(c *container.Config, hc *container.HostConfig) error { + // it is ok to have len(hc.Mounts) > 0 && (len(hc.Binds) > 0 || len (c.Volumes) > 0 || len (hc.Tmpfs) > 0 ) + + // Ensure all volumes and binds are valid. + for spec := range c.Volumes { + if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("invalid volume spec %q: %v", spec, err) + } + } + for _, spec := range hc.Binds { + if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("invalid bind mount spec %q: %v", spec, err) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/config_unix.go b/vendor/github.com/docker/docker/runconfig/config_unix.go new file mode 100644 index 000000000..b4fbfb279 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_unix.go @@ -0,0 +1,59 @@ +// +build !windows + +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` + *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. +} + +// getHostConfig gets the HostConfig of the Config. +// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { + w.InnerHostConfig.CPUShares = hc.CPUShares + } + if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { + w.InnerHostConfig.CpusetCpus = hc.CpusetCpus + } + + if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { + w.InnerHostConfig.VolumeDriver = hc.VolumeDriver + } + + hc = w.InnerHostConfig + } + + if hc != nil { + if w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards compatible API behavior. + SetDefaultNetModeIfBlank(hc) + + return hc +} diff --git a/vendor/github.com/docker/docker/runconfig/config_windows.go b/vendor/github.com/docker/docker/runconfig/config_windows.go new file mode 100644 index 000000000..f2361b554 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_windows.go @@ -0,0 +1,19 @@ +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + HostConfig *container.HostConfig `json:"HostConfig,omitempty"` + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` +} + +// getHostConfig gets the HostConfig of the Config. +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + return w.HostConfig +} diff --git a/vendor/github.com/docker/docker/runconfig/errors.go b/vendor/github.com/docker/docker/runconfig/errors.go new file mode 100644 index 000000000..c95a2919e --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/errors.go @@ -0,0 +1,38 @@ +package runconfig + +import ( + "fmt" +) + +var ( + // ErrConflictContainerNetworkAndLinks conflict between --net=container and links + ErrConflictContainerNetworkAndLinks = fmt.Errorf("conflicting options: container type network can't be used with links. This would result in undefined behavior") + // ErrConflictSharedNetwork conflict between private and other networks + ErrConflictSharedNetwork = fmt.Errorf("container sharing network namespace with another container or host cannot be connected to any other network") + // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. + ErrConflictHostNetwork = fmt.Errorf("container cannot be disconnected from host network or connected to host network") + // ErrConflictNoNetwork conflict between private and other networks + ErrConflictNoNetwork = fmt.Errorf("container cannot be connected to multiple networks with one of the networks in private (none) mode") + // ErrConflictNetworkAndDNS conflict between --dns and the network mode + ErrConflictNetworkAndDNS = fmt.Errorf("conflicting options: dns and the network mode") + // ErrConflictNetworkHostname conflict between the hostname and the network mode + ErrConflictNetworkHostname = fmt.Errorf("conflicting options: hostname and the network mode") + // ErrConflictHostNetworkAndLinks conflict between --net=host and links + ErrConflictHostNetworkAndLinks = fmt.Errorf("conflicting options: host type networking can't be used with links. This would result in undefined behavior") + // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode + ErrConflictContainerNetworkAndMac = fmt.Errorf("conflicting options: mac-address and the network mode") + // ErrConflictNetworkHosts conflict between add-host and the network mode + ErrConflictNetworkHosts = fmt.Errorf("conflicting options: custom host-to-IP mapping and the network mode") + // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode + ErrConflictNetworkPublishPorts = fmt.Errorf("conflicting options: port publishing and the container type network mode") + // ErrConflictNetworkExposePorts conflict between the expose option and the network mode + ErrConflictNetworkExposePorts = fmt.Errorf("conflicting options: port exposing and the container type network mode") + // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address + ErrUnsupportedNetworkAndIP = fmt.Errorf("user specified IP address is supported on user defined networks only") + // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address + ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("user specified IP address is supported only when connecting to networks with user configured subnets") + // ErrUnsupportedNetworkAndAlias conflict between network mode and alias + ErrUnsupportedNetworkAndAlias = fmt.Errorf("network-scoped alias is supported only for containers in user defined networks") + // ErrConflictUTSHostname conflict between the hostname and the UTS mode + ErrConflictUTSHostname = fmt.Errorf("conflicting options: hostname and the UTS mode") +) diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig.go b/vendor/github.com/docker/docker/runconfig/hostconfig.go new file mode 100644 index 000000000..e8eede150 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig.go @@ -0,0 +1,80 @@ +package runconfig + +import ( + "encoding/json" + "fmt" + "io" + "strings" + + "github.com/docker/docker/api/types/container" +) + +// DecodeHostConfig creates a HostConfig based on the specified Reader. +// It assumes the content of the reader will be JSON, and decodes it. +func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err + } + + hc := w.getHostConfig() + return hc, nil +} + +// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure +// to default if it is not populated. This ensures backwards compatibility after +// the validation of the network mode was moved from the docker CLI to the +// docker daemon. +func SetDefaultNetModeIfBlank(hc *container.HostConfig) { + if hc != nil { + if hc.NetworkMode == container.NetworkMode("") { + hc.NetworkMode = container.NetworkMode("default") + } + } +} + +// validateNetContainerMode ensures that the various combinations of requested +// network settings wrt container mode are valid. +func validateNetContainerMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if parts[0] == "container" { + if len(parts) < 2 || parts[1] == "" { + return fmt.Errorf("--net: invalid net mode: invalid container format container:") + } + } + + if hc.NetworkMode.IsContainer() && c.Hostname != "" { + return ErrConflictNetworkHostname + } + + if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { + return ErrConflictNetworkAndDNS + } + + if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { + return ErrConflictNetworkHosts + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { + return ErrConflictNetworkPublishPorts + } + + if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go b/vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go new file mode 100644 index 000000000..5b6e13dc9 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go @@ -0,0 +1,46 @@ +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return false +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + return nil +} + +// validateIsolation performs platform specific validation of the +// isolation level in the hostconfig structure. +// This setting is currently discarded for Solaris so this is a no-op. +func validateIsolation(hc *container.HostConfig) error { + return nil +} + +// validateQoS performs platform specific validation of the QoS settings +func validateQoS(hc *container.HostConfig) error { + return nil +} + +// validateResources performs platform specific validation of the resource settings +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go new file mode 100644 index 000000000..a60daa878 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -0,0 +1,110 @@ +// +build !windows,!solaris + +package runconfig + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + n := container.NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + err := validateNetContainerMode(c, hc) + if err != nil { + return err + } + + if hc.UTSMode.IsHost() && c.Hostname != "" { + return ErrConflictUTSHostname + } + + if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { + return ErrConflictHostNetworkAndLinks + } + + return nil +} + +// validateIsolation performs platform specific validation of +// isolation in the hostconfig structure. Linux only supports "default" +// which is LXC container isolation +func validateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + } + return nil +} + +// validateQoS performs platform specific validation of the QoS settings +func validateQoS(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.IOMaximumBandwidth != 0 { + return fmt.Errorf("invalid QoS settings: %s does not support --io-maxbandwidth", runtime.GOOS) + } + + if hc.IOMaximumIOps != 0 { + return fmt.Errorf("invalid QoS settings: %s does not support --io-maxiops", runtime.GOOS) + } + return nil +} + +// validateResources performs platform specific validation of the resource settings +// cpu-rt-runtime and cpu-rt-period can not be greater than their parent, cpu-rt-runtime requires sys_nice +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.Resources.CPURealtimePeriod > 0 && !si.CPURealtimePeriod { + return fmt.Errorf("invalid --cpu-rt-period: Your kernel does not support cgroup rt period") + } + + if hc.Resources.CPURealtimeRuntime > 0 && !si.CPURealtimeRuntime { + return fmt.Errorf("invalid --cpu-rt-runtime: Your kernel does not support cgroup rt runtime") + } + + if hc.Resources.CPURealtimePeriod != 0 && hc.Resources.CPURealtimeRuntime != 0 && hc.Resources.CPURealtimeRuntime > hc.Resources.CPURealtimePeriod { + return fmt.Errorf("invalid --cpu-rt-runtime: rt runtime cannot be higher than rt period") + } + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go new file mode 100644 index 000000000..9ca93ae50 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go @@ -0,0 +1,96 @@ +package runconfig + +import ( + "fmt" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("nat") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return !container.NetworkMode(network).IsUserDefined() +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + if hc == nil { + return nil + } + + err := validateNetContainerMode(c, hc) + if err != nil { + return err + } + + if hc.NetworkMode.IsContainer() && hc.Isolation.IsHyperV() { + return fmt.Errorf("net mode --net=container: unsupported for hyperv isolation") + } + + return nil +} + +// validateIsolation performs platform specific validation of the +// isolation in the hostconfig structure. Windows supports 'default' (or +// blank), 'process', or 'hyperv'. +func validateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) + } + return nil +} + +// validateQoS performs platform specific validation of the Qos settings +func validateQoS(hc *container.HostConfig) error { + return nil +} + +// validateResources performs platform specific validation of the resource settings +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.Resources.CPURealtimePeriod != 0 { + return fmt.Errorf("invalid --cpu-rt-period: Windows does not support this feature") + } + if hc.Resources.CPURealtimeRuntime != 0 { + return fmt.Errorf("invalid --cpu-rt-runtime: Windows does not support this feature") + } + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.Privileged { + return fmt.Errorf("invalid --privileged: Windows does not support this feature") + } + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.ReadonlyRootfs { + return fmt.Errorf("invalid --read-only: Windows does not support this feature") + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go deleted file mode 100644 index 1fc8ae8d7..000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ /dev/null @@ -1,829 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. -It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. - -This package produces a different output than the standard "encoding/json" package, -which does not operate correctly on protocol buffers. -*/ -package jsonpb - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" -) - -// Marshaler is a configurable object for converting between -// protocol buffer objects and a JSON representation for them. -type Marshaler struct { - // Whether to render enum values as integers, as opposed to string values. - EnumsAsInts bool - - // Whether to render fields with zero values. - EmitDefaults bool - - // A string to indent each level by. The presence of this field will - // also cause a space to appear between the field separator and - // value, and for newlines to be appear between fields and array - // elements. - Indent string - - // Whether to use the original (.proto) name for fields. - OrigName bool -} - -// Marshal marshals a protocol buffer into JSON. -func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { - writer := &errWriter{writer: out} - return m.marshalObject(writer, pb, "", "") -} - -// MarshalToString converts a protocol buffer object to JSON string. -func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { - var buf bytes.Buffer - if err := m.Marshal(&buf, pb); err != nil { - return "", err - } - return buf.String(), nil -} - -type int32Slice []int32 - -// For sorting extensions ids to ensure stable output. -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type wkt interface { - XXX_WellKnownType() string -} - -// marshalObject writes a struct to the Writer. -func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { - s := reflect.ValueOf(v).Elem() - - // Handle well-known types. - if wkt, ok := v.(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", - "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - // "Wrappers use the same representation in JSON - // as the wrapped primitive type, ..." - sprop := proto.GetProperties(s.Type()) - return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) - case "Any": - // Any is a bit more involved. - return m.marshalAny(out, v, indent) - case "Duration": - // "Generated output always contains 3, 6, or 9 fractional digits, - // depending on required precision." - s, ns := s.Field(0).Int(), s.Field(1).Int() - d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond - x := fmt.Sprintf("%.9f", d.Seconds()) - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - out.write(`"`) - out.write(x) - out.write(`s"`) - return out.err - case "Struct": - // Let marshalValue handle the `fields` map. - // TODO: pass the correct Properties if needed. - return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) - case "Timestamp": - // "RFC 3339, where generated output will always be Z-normalized - // and uses 3, 6 or 9 fractional digits." - s, ns := s.Field(0).Int(), s.Field(1).Int() - t := time.Unix(s, ns).UTC() - // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). - x := t.Format("2006-01-02T15:04:05.000000000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - out.write(`"`) - out.write(x) - out.write(`Z"`) - return out.err - case "Value": - // Value has a single oneof. - kind := s.Field(0) - if kind.IsNil() { - // "absence of any variant indicates an error" - return errors.New("nil Value") - } - // oneof -> *T -> T -> T.F - x := kind.Elem().Elem().Field(0) - // TODO: pass the correct Properties if needed. - return m.marshalValue(out, &proto.Properties{}, x, indent) - } - } - - out.write("{") - if m.Indent != "" { - out.write("\n") - } - - firstField := true - - if typeURL != "" { - if err := m.marshalTypeURL(out, indent, typeURL); err != nil { - return err - } - firstField = false - } - - for i := 0; i < s.NumField(); i++ { - value := s.Field(i) - valueField := s.Type().Field(i) - if strings.HasPrefix(valueField.Name, "XXX_") { - continue - } - - // IsNil will panic on most value kinds. - switch value.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - if value.IsNil() { - continue - } - } - - if !m.EmitDefaults { - switch value.Kind() { - case reflect.Bool: - if !value.Bool() { - continue - } - case reflect.Int32, reflect.Int64: - if value.Int() == 0 { - continue - } - case reflect.Uint32, reflect.Uint64: - if value.Uint() == 0 { - continue - } - case reflect.Float32, reflect.Float64: - if value.Float() == 0 { - continue - } - case reflect.String: - if value.Len() == 0 { - continue - } - } - } - - // Oneof fields need special handling. - if valueField.Tag.Get("protobuf_oneof") != "" { - // value is an interface containing &T{real_value}. - sv := value.Elem().Elem() // interface -> *T -> T - value = sv.Field(0) - valueField = sv.Type().Field(0) - } - prop := jsonProperties(valueField, m.OrigName) - if !firstField { - m.writeSep(out) - } - if err := m.marshalField(out, prop, value, indent); err != nil { - return err - } - firstField = false - } - - // Handle proto2 extensions. - if ep, ok := v.(proto.Message); ok { - extensions := proto.RegisteredExtensions(v) - // Sort extensions for stable output. - ids := make([]int32, 0, len(extensions)) - for id, desc := range extensions { - if !proto.HasExtension(ep, desc) { - continue - } - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - for _, id := range ids { - desc := extensions[id] - if desc == nil { - // unknown extension - continue - } - ext, extErr := proto.GetExtension(ep, desc) - if extErr != nil { - return extErr - } - value := reflect.ValueOf(ext) - var prop proto.Properties - prop.Parse(desc.Tag) - prop.JSONName = fmt.Sprintf("[%s]", desc.Name) - if !firstField { - m.writeSep(out) - } - if err := m.marshalField(out, &prop, value, indent); err != nil { - return err - } - firstField = false - } - - } - - if m.Indent != "" { - out.write("\n") - out.write(indent) - } - out.write("}") - return out.err -} - -func (m *Marshaler) writeSep(out *errWriter) { - if m.Indent != "" { - out.write(",\n") - } else { - out.write(",") - } -} - -func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { - // "If the Any contains a value that has a special JSON mapping, - // it will be converted as follows: {"@type": xxx, "value": yyy}. - // Otherwise, the value will be converted into a JSON object, - // and the "@type" field will be inserted to indicate the actual data type." - v := reflect.ValueOf(any).Elem() - turl := v.Field(0).String() - val := v.Field(1).Bytes() - - // Only the part of type_url after the last slash is relevant. - mname := turl - if slash := strings.LastIndex(mname, "/"); slash >= 0 { - mname = mname[slash+1:] - } - mt := proto.MessageType(mname) - if mt == nil { - return fmt.Errorf("unknown message type %q", mname) - } - msg := reflect.New(mt.Elem()).Interface().(proto.Message) - if err := proto.Unmarshal(val, msg); err != nil { - return err - } - - if _, ok := msg.(wkt); ok { - out.write("{") - if m.Indent != "" { - out.write("\n") - } - if err := m.marshalTypeURL(out, indent, turl); err != nil { - return err - } - m.writeSep(out) - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - out.write(`"value": `) - } else { - out.write(`"value":`) - } - if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { - return err - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - } - out.write("}") - return out.err - } - - return m.marshalObject(out, msg, indent, turl) -} - -func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - } - out.write(`"@type":`) - if m.Indent != "" { - out.write(" ") - } - b, err := json.Marshal(typeURL) - if err != nil { - return err - } - out.write(string(b)) - return out.err -} - -// marshalField writes field description and value to the Writer. -func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - } - out.write(`"`) - out.write(prop.JSONName) - out.write(`":`) - if m.Indent != "" { - out.write(" ") - } - if err := m.marshalValue(out, prop, v, indent); err != nil { - return err - } - return nil -} - -// marshalValue writes the value to the Writer. -func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - - var err error - v = reflect.Indirect(v) - - // Handle repeated elements. - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { - out.write("[") - comma := "" - for i := 0; i < v.Len(); i++ { - sliceVal := v.Index(i) - out.write(comma) - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - out.write(m.Indent) - } - if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { - return err - } - comma = "," - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - } - out.write("]") - return out.err - } - - // Handle well-known types. - // Most are handled up in marshalObject (because 99% are messages). - type wkt interface { - XXX_WellKnownType() string - } - if wkt, ok := v.Interface().(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "NullValue": - out.write("null") - return out.err - } - } - - // Handle enumerations. - if !m.EnumsAsInts && prop.Enum != "" { - // Unknown enum values will are stringified by the proto library as their - // value. Such values should _not_ be quoted or they will be interpreted - // as an enum string instead of their value. - enumStr := v.Interface().(fmt.Stringer).String() - var valStr string - if v.Kind() == reflect.Ptr { - valStr = strconv.Itoa(int(v.Elem().Int())) - } else { - valStr = strconv.Itoa(int(v.Int())) - } - isKnownEnum := enumStr != valStr - if isKnownEnum { - out.write(`"`) - } - out.write(enumStr) - if isKnownEnum { - out.write(`"`) - } - return out.err - } - - // Handle nested messages. - if v.Kind() == reflect.Struct { - return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") - } - - // Handle maps. - // Since Go randomizes map iteration, we sort keys for stable output. - if v.Kind() == reflect.Map { - out.write(`{`) - keys := v.MapKeys() - sort.Sort(mapKeys(keys)) - for i, k := range keys { - if i > 0 { - out.write(`,`) - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - out.write(m.Indent) - } - - b, err := json.Marshal(k.Interface()) - if err != nil { - return err - } - s := string(b) - - // If the JSON is not a string value, encode it again to make it one. - if !strings.HasPrefix(s, `"`) { - b, err := json.Marshal(s) - if err != nil { - return err - } - s = string(b) - } - - out.write(s) - out.write(`:`) - if m.Indent != "" { - out.write(` `) - } - - if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil { - return err - } - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - } - out.write(`}`) - return out.err - } - - // Default handling defers to the encoding/json library. - b, err := json.Marshal(v.Interface()) - if err != nil { - return err - } - needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) - if needToQuote { - out.write(`"`) - } - out.write(string(b)) - if needToQuote { - out.write(`"`) - } - return out.err -} - -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // Whether to allow messages to contain unknown fields, as opposed to - // failing to unmarshal. - AllowUnknownFields bool -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - inputValue := json.RawMessage{} - if err := dec.Decode(&inputValue); err != nil { - return err - } - return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { - dec := json.NewDecoder(r) - return u.UnmarshalNext(dec, pb) -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - return new(Unmarshaler).UnmarshalNext(dec, pb) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func Unmarshal(r io.Reader, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(r, pb) -} - -// UnmarshalString will populate the fields of a protocol buffer based -// on a JSON string. This function is lenient and will decode any options -// permutations of the related Marshaler. -func UnmarshalString(str string, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) -} - -// unmarshalValue converts/copies a value into the target. -// prop may be nil. -func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { - targetType := target.Type() - - // Allocate memory for pointer fields. - if targetType.Kind() == reflect.Ptr { - target.Set(reflect.New(targetType.Elem())) - return u.unmarshalValue(target.Elem(), inputValue, prop) - } - - // Handle well-known types. - type wkt interface { - XXX_WellKnownType() string - } - if wkt, ok := target.Addr().Interface().(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", - "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - // "Wrappers use the same representation in JSON - // as the wrapped primitive type, except that null is allowed." - // encoding/json will turn JSON `null` into Go `nil`, - // so we don't have to do any extra work. - return u.unmarshalValue(target.Field(0), inputValue, prop) - case "Any": - return fmt.Errorf("unmarshaling Any not supported yet") - case "Duration": - unq, err := strconv.Unquote(string(inputValue)) - if err != nil { - return err - } - d, err := time.ParseDuration(unq) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - ns := d.Nanoseconds() - s := ns / 1e9 - ns %= 1e9 - target.Field(0).SetInt(s) - target.Field(1).SetInt(ns) - return nil - case "Timestamp": - unq, err := strconv.Unquote(string(inputValue)) - if err != nil { - return err - } - t, err := time.Parse(time.RFC3339Nano, unq) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - target.Field(0).SetInt(int64(t.Unix())) - target.Field(1).SetInt(int64(t.Nanosecond())) - return nil - } - } - - // Handle enums, which have an underlying type of int32, - // and may appear as strings. - // The case of an enum appearing as a number is handled - // at the bottom of this function. - if inputValue[0] == '"' && prop != nil && prop.Enum != "" { - vmap := proto.EnumValueMap(prop.Enum) - // Don't need to do unquoting; valid enum names - // are from a limited character set. - s := inputValue[1 : len(inputValue)-1] - n, ok := vmap[string(s)] - if !ok { - return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) - } - if target.Kind() == reflect.Ptr { // proto2 - target.Set(reflect.New(targetType.Elem())) - target = target.Elem() - } - target.SetInt(int64(n)) - return nil - } - - // Handle nested messages. - if targetType.Kind() == reflect.Struct { - var jsonFields map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &jsonFields); err != nil { - return err - } - - consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { - // Be liberal in what names we accept; both orig_name and camelName are okay. - fieldNames := acceptedJSONFieldNames(prop) - - vOrig, okOrig := jsonFields[fieldNames.orig] - vCamel, okCamel := jsonFields[fieldNames.camel] - if !okOrig && !okCamel { - return nil, false - } - // If, for some reason, both are present in the data, favour the camelName. - var raw json.RawMessage - if okOrig { - raw = vOrig - delete(jsonFields, fieldNames.orig) - } - if okCamel { - raw = vCamel - delete(jsonFields, fieldNames.camel) - } - return raw, true - } - - sprops := proto.GetProperties(targetType) - for i := 0; i < target.NumField(); i++ { - ft := target.Type().Field(i) - if strings.HasPrefix(ft.Name, "XXX_") { - continue - } - - valueForField, ok := consumeField(sprops.Prop[i]) - if !ok { - continue - } - - if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { - return err - } - } - // Check for any oneof fields. - if len(jsonFields) > 0 { - for _, oop := range sprops.OneofTypes { - raw, ok := consumeField(oop.Prop) - if !ok { - continue - } - nv := reflect.New(oop.Type.Elem()) - target.Field(oop.Field).Set(nv) - if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { - return err - } - } - } - if !u.AllowUnknownFields && len(jsonFields) > 0 { - // Pick any field to be the scapegoat. - var f string - for fname := range jsonFields { - f = fname - break - } - return fmt.Errorf("unknown field %q in %v", f, targetType) - } - return nil - } - - // Handle arrays (which aren't encoded bytes) - if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { - var slc []json.RawMessage - if err := json.Unmarshal(inputValue, &slc); err != nil { - return err - } - len := len(slc) - target.Set(reflect.MakeSlice(targetType, len, len)) - for i := 0; i < len; i++ { - if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { - return err - } - } - return nil - } - - // Handle maps (whose keys are always strings) - if targetType.Kind() == reflect.Map { - var mp map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &mp); err != nil { - return err - } - target.Set(reflect.MakeMap(targetType)) - var keyprop, valprop *proto.Properties - if prop != nil { - // These could still be nil if the protobuf metadata is broken somehow. - // TODO: This won't work because the fields are unexported. - // We should probably just reparse them. - //keyprop, valprop = prop.mkeyprop, prop.mvalprop - } - for ks, raw := range mp { - // Unmarshal map key. The core json library already decoded the key into a - // string, so we handle that specially. Other types were quoted post-serialization. - var k reflect.Value - if targetType.Key().Kind() == reflect.String { - k = reflect.ValueOf(ks) - } else { - k = reflect.New(targetType.Key()).Elem() - if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { - return err - } - } - - // Unmarshal map value. - v := reflect.New(targetType.Elem()).Elem() - if err := u.unmarshalValue(v, raw, valprop); err != nil { - return err - } - target.SetMapIndex(k, v) - } - return nil - } - - // 64-bit integers can be encoded as strings. In this case we drop - // the quotes and proceed as normal. - isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 - if isNum && strings.HasPrefix(string(inputValue), `"`) { - inputValue = inputValue[1 : len(inputValue)-1] - } - - // Use the encoding/json for parsing other value types. - return json.Unmarshal(inputValue, target.Addr().Interface()) -} - -// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. -func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { - var prop proto.Properties - prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) - if origName || prop.JSONName == "" { - prop.JSONName = prop.OrigName - } - return &prop -} - -type fieldNames struct { - orig, camel string -} - -func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { - opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} - if prop.JSONName != "" { - opts.camel = prop.JSONName - } - return opts -} - -// Writer wrapper inspired by https://blog.golang.org/errors-are-values -type errWriter struct { - writer io.Writer - err error -} - -func (w *errWriter) write(str string) { - if w.err != nil { - return - } - _, w.err = w.writer.Write([]byte(str)) -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. -// -// Numeric keys are sorted in numeric order per -// https://developers.google.com/protocol-buffers/docs/proto#maps. -type mapKeys []reflect.Value - -func (s mapKeys) Len() int { return len(s) } -func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s mapKeys) Less(i, j int) bool { - if k := s[i].Kind(); k == s[j].Kind() { - switch k { - case reflect.Int32, reflect.Int64: - return s[i].Int() < s[j].Int() - case reflect.Uint32, reflect.Uint64: - return s[i].Uint() < s[j].Uint() - } - } - return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 000000000..89e07ae19 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,136 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 000000000..c0d595da7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 000000000..65cb0f8eb --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 000000000..1b3657622 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go new file mode 100644 index 000000000..773eb279d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go @@ -0,0 +1,832 @@ +// Package client implements a now-deprecated client for InfluxDB; +// use github.com/influxdata/influxdb/client/v2 instead. +package client // import "github.com/influxdata/influxdb/client" + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/models" +) + +const ( + // DefaultHost is the default host used to connect to an InfluxDB instance + DefaultHost = "localhost" + + // DefaultPort is the default port used to connect to an InfluxDB instance + DefaultPort = 8086 + + // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance + DefaultTimeout = 0 +) + +// Query is used to send a command to the server. Both Command and Database are required. +type Query struct { + Command string + Database string + + // Chunked tells the server to send back chunked responses. This places + // less load on the server by sending back chunks of the response rather + // than waiting for the entire response all at once. + Chunked bool + + // ChunkSize sets the maximum number of rows that will be returned per + // chunk. Chunks are either divided based on their series or if they hit + // the chunk size limit. + // + // Chunked must be set to true for this option to be used. + ChunkSize int +} + +// ParseConnectionString will parse a string to create a valid connection URL +func ParseConnectionString(path string, ssl bool) (url.URL, error) { + var host string + var port int + + h, p, err := net.SplitHostPort(path) + if err != nil { + if path == "" { + host = DefaultHost + } else { + host = path + } + // If they didn't specify a port, always use the default port + port = DefaultPort + } else { + host = h + port, err = strconv.Atoi(p) + if err != nil { + return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err) + } + } + + u := url.URL{ + Scheme: "http", + } + if ssl { + u.Scheme = "https" + } + + u.Host = net.JoinHostPort(host, strconv.Itoa(port)) + + return u, nil +} + +// Config is used to specify what server to connect to. +// URL: The URL of the server connecting to. +// Username/Password are optional. They will be passed via basic auth if provided. +// UserAgent: If not provided, will default "InfluxDBClient", +// Timeout: If not provided, will default to 0 (no timeout) +type Config struct { + URL url.URL + UnixSocket string + Username string + Password string + UserAgent string + Timeout time.Duration + Precision string + WriteConsistency string + UnsafeSsl bool +} + +// NewConfig will create a config to be used in connecting to the client +func NewConfig() Config { + return Config{ + Timeout: DefaultTimeout, + } +} + +// Client is used to make calls to the server. +type Client struct { + url url.URL + unixSocket string + username string + password string + httpClient *http.Client + userAgent string + precision string +} + +const ( + // ConsistencyOne requires at least one data node acknowledged a write. + ConsistencyOne = "one" + + // ConsistencyAll requires all data nodes to acknowledge a write. + ConsistencyAll = "all" + + // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write. + ConsistencyQuorum = "quorum" + + // ConsistencyAny allows for hinted hand off, potentially no write happened yet. + ConsistencyAny = "any" +) + +// NewClient will instantiate and return a connected client to issue commands to the server. +func NewClient(c Config) (*Client, error) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.UnsafeSsl, + } + + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + } + + if c.UnixSocket != "" { + // No need for compression in local communications. + tr.DisableCompression = true + + tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", c.UnixSocket) + } + } + + client := Client{ + url: c.URL, + unixSocket: c.UnixSocket, + username: c.Username, + password: c.Password, + httpClient: &http.Client{Timeout: c.Timeout, Transport: tr}, + userAgent: c.UserAgent, + precision: c.Precision, + } + if client.userAgent == "" { + client.userAgent = "InfluxDBClient" + } + return &client, nil +} + +// SetAuth will update the username and passwords +func (c *Client) SetAuth(u, p string) { + c.username = u + c.password = p +} + +// SetPrecision will update the precision +func (c *Client) SetPrecision(precision string) { + c.precision = precision +} + +// Query sends a command to the server and returns the Response +func (c *Client) Query(q Query) (*Response, error) { + u := c.url + + u.Path = "query" + values := u.Query() + values.Set("q", q.Command) + values.Set("db", q.Database) + if q.Chunked { + values.Set("chunked", "true") + if q.ChunkSize > 0 { + values.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + } + if c.precision != "" { + values.Set("epoch", c.precision) + } + u.RawQuery = values.Encode() + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + if q.Chunked { + cr := NewChunkedResponse(resp.Body) + for { + r, err := cr.NextResponse() + if err != nil { + // If we got an error while decoding the response, send that back. + return nil, err + } + + if r == nil { + break + } + + response.Results = append(response.Results, r.Results...) + if r.Err != nil { + response.Err = r.Err + break + } + } + } else { + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + if err := dec.Decode(&response); err != nil { + // Ignore EOF errors if we got an invalid status code. + if !(err == io.EOF && resp.StatusCode != http.StatusOK) { + return nil, err + } + } + } + + // If we don't have an error in our json response, and didn't get StatusOK, + // then send back an error. + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) + } + return &response, nil +} + +// Write takes BatchPoints and allows for writing of multiple points with defaults +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) Write(bp BatchPoints) (*Response, error) { + u := c.url + u.Path = "write" + + var b bytes.Buffer + for _, p := range bp.Points { + err := checkPointTypes(p) + if err != nil { + return nil, err + } + if p.Raw != "" { + if _, err := b.WriteString(p.Raw); err != nil { + return nil, err + } + } else { + for k, v := range bp.Tags { + if p.Tags == nil { + p.Tags = make(map[string]string, len(bp.Tags)) + } + p.Tags[k] = v + } + + if _, err := b.WriteString(p.MarshalString()); err != nil { + return nil, err + } + } + + if err := b.WriteByte('\n'); err != nil { + return nil, err + } + } + + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + precision := bp.Precision + if precision == "" { + precision = c.precision + } + + params := req.URL.Query() + params.Set("db", bp.Database) + params.Set("rp", bp.RetentionPolicy) + params.Set("precision", precision) + params.Set("consistency", bp.WriteConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// WriteLineProtocol takes a string with line returns to delimit each write +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) { + u := c.url + u.Path = "write" + + r := strings.NewReader(data) + + req, err := http.NewRequest("POST", u.String(), r) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + params := req.URL.Query() + params.Set("db", database) + params.Set("rp", retentionPolicy) + params.Set("precision", precision) + params.Set("consistency", writeConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + err := fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// Ping will check to see if the server is up +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *Client) Ping() (time.Duration, string, error) { + now := time.Now() + u := c.url + u.Path = "ping" + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Structs + +// Message represents a user message. +type Message struct { + Level string `json:"level,omitempty"` + Text string `json:"text,omitempty"` +} + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Messages []*Message + Err error +} + +// MarshalJSON encodes the result into JSON. +func (r *Result) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Series = r.Series + o.Messages = r.Messages + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Result struct +func (r *Result) UnmarshalJSON(b []byte) error { + var o struct { + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Series = o.Series + r.Messages = o.Messages + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err error +} + +// MarshalJSON encodes the response into JSON. +func (r *Response) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Results = r.Results + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Response struct +func (r *Response) UnmarshalJSON(b []byte) error { + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Results = o.Results + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Error returns the first error from any statement. +// Returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != nil { + return r.Err + } + for _, result := range r.Results { + if result.Err != nil { + return result.Err + } + } + return nil +} + +// duplexReader reads responses and writes it to another writer while +// satisfying the reader interface. +type duplexReader struct { + r io.Reader + w io.Writer +} + +func (r *duplexReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == nil { + r.w.Write(p[:n]) + } + return n, err +} + +// ChunkedResponse represents a response from the server that +// uses chunking to stream the output. +type ChunkedResponse struct { + dec *json.Decoder + duplex *duplexReader + buf bytes.Buffer +} + +// NewChunkedResponse reads a stream and produces responses from the stream. +func NewChunkedResponse(r io.Reader) *ChunkedResponse { + resp := &ChunkedResponse{} + resp.duplex = &duplexReader{r: r, w: &resp.buf} + resp.dec = json.NewDecoder(resp.duplex) + resp.dec.UseNumber() + return resp +} + +// NextResponse reads the next line of the stream and returns a response. +func (r *ChunkedResponse) NextResponse() (*Response, error) { + var response Response + if err := r.dec.Decode(&response); err != nil { + if err == io.EOF { + return nil, nil + } + // A decoding error happened. This probably means the server crashed + // and sent a last-ditch error message to us. Ensure we have read the + // entirety of the connection to get any remaining error text. + io.Copy(ioutil.Discard, r.duplex) + return nil, errors.New(strings.TrimSpace(r.buf.String())) + } + r.buf.Reset() + return &response, nil +} + +// Point defines the fields that will be written to the database +// Measurement, Time, and Fields are required +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type Point struct { + Measurement string + Tags map[string]string + Time time.Time + Fields map[string]interface{} + Precision string + Raw string +} + +// MarshalJSON will format the time in RFC3339Nano +// Precision is also ignored as it is only used for writing, not reading +// Or another way to say it is we always send back in nanosecond precision +func (p *Point) MarshalJSON() ([]byte, error) { + point := struct { + Measurement string `json:"measurement,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time string `json:"time,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + Precision string `json:"precision,omitempty"` + }{ + Measurement: p.Measurement, + Tags: p.Tags, + Fields: p.Fields, + Precision: p.Precision, + } + // Let it omit empty if it's really zero + if !p.Time.IsZero() { + point.Time = p.Time.UTC().Format(time.RFC3339Nano) + } + return json.Marshal(&point) +} + +// MarshalString renders string representation of a Point with specified +// precision. The default precision is nanoseconds. +func (p *Point) MarshalString() string { + pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time) + if err != nil { + return "# ERROR: " + err.Error() + " " + p.Measurement + } + if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { + return pt.String() + } + return pt.PrecisionString(p.Precision) +} + +// UnmarshalJSON decodes the data into the Point struct +func (p *Point) UnmarshalJSON(b []byte) error { + var normal struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + var epoch struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + + if err := func() error { + var err error + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err = dec.Decode(&epoch); err != nil { + return err + } + // Convert from epoch to time.Time, but only if Time + // was actually set. + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + p.Measurement = epoch.Measurement + p.Tags = epoch.Tags + p.Time = ts + p.Precision = epoch.Precision + p.Fields = normalizeFields(epoch.Fields) + return nil + }(); err == nil { + return nil + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err := dec.Decode(&normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + p.Measurement = normal.Measurement + p.Tags = normal.Tags + p.Time = normal.Time + p.Precision = normal.Precision + p.Fields = normalizeFields(normal.Fields) + + return nil +} + +// Remove any notion of json.Number +func normalizeFields(fields map[string]interface{}) map[string]interface{} { + newFields := map[string]interface{}{} + + for k, v := range fields { + switch v := v.(type) { + case json.Number: + jv, e := v.Float64() + if e != nil { + panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e)) + } + newFields[k] = jv + default: + newFields[k] = v + } + } + return newFields +} + +// BatchPoints is used to send batched data in a single write. +// Database and Points are required +// If no retention policy is specified, it will use the databases default retention policy. +// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored. +// If time is specified, it will be applied to any point with an empty time. +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type BatchPoints struct { + Points []Point `json:"points,omitempty"` + Database string `json:"database,omitempty"` + RetentionPolicy string `json:"retentionPolicy,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time time.Time `json:"time,omitempty"` + Precision string `json:"precision,omitempty"` + WriteConsistency string `json:"-"` +} + +// UnmarshalJSON decodes the data into the BatchPoints struct +func (bp *BatchPoints) UnmarshalJSON(b []byte) error { + var normal struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + } + var epoch struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + } + + if err := func() error { + var err error + if err = json.Unmarshal(b, &epoch); err != nil { + return err + } + // Convert from epoch to time.Time + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + bp.Points = epoch.Points + bp.Database = epoch.Database + bp.RetentionPolicy = epoch.RetentionPolicy + bp.Tags = epoch.Tags + bp.Time = ts + bp.Precision = epoch.Precision + return nil + }(); err == nil { + return nil + } + + if err := json.Unmarshal(b, &normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + bp.Points = normal.Points + bp.Database = normal.Database + bp.RetentionPolicy = normal.RetentionPolicy + bp.Tags = normal.Tags + bp.Time = normal.Time + bp.Precision = normal.Precision + + return nil +} + +// utility functions + +// Addr provides the current url as a string of the server the client is connected to. +func (c *Client) Addr() string { + if c.unixSocket != "" { + return c.unixSocket + } + return c.url.String() +} + +// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found. +func checkPointTypes(p Point) error { + for _, v := range p.Fields { + switch v.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil: + return nil + default: + return fmt.Errorf("unsupported point type: %T", v) + } + } + return nil +} + +// helper functions + +// EpochToTime takes a unix epoch time and uses precision to return back a time.Time +func EpochToTime(epoch int64, precision string) (time.Time, error) { + if precision == "" { + precision = "s" + } + var t time.Time + switch precision { + case "h": + t = time.Unix(0, epoch*int64(time.Hour)) + case "m": + t = time.Unix(0, epoch*int64(time.Minute)) + case "s": + t = time.Unix(0, epoch*int64(time.Second)) + case "ms": + t = time.Unix(0, epoch*int64(time.Millisecond)) + case "u": + t = time.Unix(0, epoch*int64(time.Microsecond)) + case "n": + t = time.Unix(0, epoch) + default: + return time.Time{}, fmt.Errorf("Unknown precision %q", precision) + } + return t, nil +} + +// SetPrecision will round a time to the specified precision +func SetPrecision(t time.Time, precision string) time.Time { + switch precision { + case "n": + case "u": + return t.Round(time.Microsecond) + case "ms": + return t.Round(time.Millisecond) + case "s": + return t.Round(time.Second) + case "m": + return t.Round(time.Minute) + case "h": + return t.Round(time.Hour) + } + return t +} diff --git a/vendor/github.com/mesosphere/mesos-dns/records/state/state.go b/vendor/github.com/mesosphere/mesos-dns/records/state/state.go index e3ca70f49..c15536b01 100644 --- a/vendor/github.com/mesosphere/mesos-dns/records/state/state.go +++ b/vendor/github.com/mesosphere/mesos-dns/records/state/state.go @@ -50,7 +50,7 @@ type Status struct { State string `json:"state"` Labels []Label `json:"labels,omitempty"` ContainerStatus ContainerStatus `json:"container_status,omitempty"` - Healthy *bool `json:"healthy"` + Healthy *bool `json:"healthy"` } // ContainerStatus holds container metadata as defined in the /state.json @@ -253,8 +253,8 @@ type DiscoveryInfo struct { Location string `json:"location,omitempty"` Environment string `json:"environment,omitempty"` Labels struct { - Labels []Label `json:"labels"` - } `json:"labels"` + Labels []Label `json:"labels"` + } `json:"labels"` Ports Ports `json:"ports"` } diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 6ec5c3335..30a9957c6 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -686,7 +686,11 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Compile the list of all the fields that we're going to be decoding // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} for len(structs) > 0 { structVal := structs[0] structs = structs[1:] @@ -718,14 +722,16 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) } // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) + fields = append(fields, field{fieldType, structVal.Field(i)}) } } - for fieldType, field := range fields { - fieldName := fieldType.Name + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name - tagValue := fieldType.Tag.Get(d.config.TagName) + tagValue := field.Tag.Get(d.config.TagName) tagValue = strings.SplitN(tagValue, ",", 2)[0] if tagValue != "" { fieldName = tagValue @@ -760,14 +766,14 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Delete the key we're using from the unused map so we stop tracking delete(dataValKeysUnused, rawMapKey.Interface()) - if !field.IsValid() { + if !fieldValue.IsValid() { // This should never happen panic("field is not valid") } // If we can't set the field, then it is unexported or something, // and we just continue onwards. - if !field.CanSet() { + if !fieldValue.CanSet() { continue } @@ -777,7 +783,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) fieldName = fmt.Sprintf("%s.%s", name, fieldName) } - if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { errors = appendErrors(errors, err) } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go new file mode 100644 index 000000000..8981b2a2f --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go @@ -0,0 +1,114 @@ +// +build linux + +package libcontainer + +import ( + "fmt" + "os" + "strings" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/syndtr/gocapability/capability" +) + +const allCapabilityTypes = capability.CAPS | capability.BOUNDS | capability.AMBS + +var capabilityMap map[string]capability.Cap + +func init() { + capabilityMap = make(map[string]capability.Cap) + last := capability.CAP_LAST_CAP + // workaround for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capKey := fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String())) + capabilityMap[capKey] = cap + } +} + +func newContainerCapList(capConfig *configs.Capabilities) (*containerCapabilities, error) { + bounding := []capability.Cap{} + for _, c := range capConfig.Bounding { + v, ok := capabilityMap[c] + if !ok { + return nil, fmt.Errorf("unknown capability %q", c) + } + bounding = append(bounding, v) + } + effective := []capability.Cap{} + for _, c := range capConfig.Effective { + v, ok := capabilityMap[c] + if !ok { + return nil, fmt.Errorf("unknown capability %q", c) + } + effective = append(effective, v) + } + inheritable := []capability.Cap{} + for _, c := range capConfig.Inheritable { + v, ok := capabilityMap[c] + if !ok { + return nil, fmt.Errorf("unknown capability %q", c) + } + inheritable = append(inheritable, v) + } + permitted := []capability.Cap{} + for _, c := range capConfig.Permitted { + v, ok := capabilityMap[c] + if !ok { + return nil, fmt.Errorf("unknown capability %q", c) + } + permitted = append(permitted, v) + } + ambient := []capability.Cap{} + for _, c := range capConfig.Ambient { + v, ok := capabilityMap[c] + if !ok { + return nil, fmt.Errorf("unknown capability %q", c) + } + ambient = append(ambient, v) + } + pid, err := capability.NewPid(os.Getpid()) + if err != nil { + return nil, err + } + return &containerCapabilities{ + bounding: bounding, + effective: effective, + inheritable: inheritable, + permitted: permitted, + ambient: ambient, + pid: pid, + }, nil +} + +type containerCapabilities struct { + pid capability.Capabilities + bounding []capability.Cap + effective []capability.Cap + inheritable []capability.Cap + permitted []capability.Cap + ambient []capability.Cap +} + +// ApplyBoundingSet sets the capability bounding set to those specified in the whitelist. +func (c *containerCapabilities) ApplyBoundingSet() error { + c.pid.Clear(capability.BOUNDS) + c.pid.Set(capability.BOUNDS, c.bounding...) + return c.pid.Apply(capability.BOUNDS) +} + +// Apply sets all the capabilities for the current process in the config. +func (c *containerCapabilities) ApplyCaps() error { + c.pid.Clear(allCapabilityTypes) + c.pid.Set(capability.BOUNDS, c.bounding...) + c.pid.Set(capability.PERMITTED, c.permitted...) + c.pid.Set(capability.INHERITABLE, c.inheritable...) + c.pid.Set(capability.EFFECTIVE, c.effective...) + c.pid.Set(capability.AMBIENT, c.ambient...) + return c.pid.Apply(allCapabilityTypes) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go new file mode 100644 index 000000000..c7bdf1f60 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go @@ -0,0 +1,10 @@ +// +build linux,!go1.5 + +package libcontainer + +import "syscall" + +// GidMappingsEnableSetgroups was added in Go 1.5, so do nothing when building +// with earlier versions +func enableSetgroups(sys *syscall.SysProcAttr) { +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console.go b/vendor/github.com/opencontainers/runc/libcontainer/console.go new file mode 100644 index 000000000..917acc702 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/console.go @@ -0,0 +1,17 @@ +package libcontainer + +import ( + "io" + "os" +) + +// Console represents a pseudo TTY. +type Console interface { + io.ReadWriteCloser + + // Path returns the filesystem path to the slave side of the pty. + Path() string + + // Fd returns the fd for the master of the pty. + File() *os.File +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go b/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go new file mode 100644 index 000000000..b7166a31f --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go @@ -0,0 +1,13 @@ +// +build freebsd + +package libcontainer + +import ( + "errors" +) + +// newConsole returns an initialized console that can be used within a container by copying bytes +// from the master side to the slave that is attached as the tty for the container's init process. +func newConsole() (Console, error) { + return nil, errors.New("libcontainer console is not supported on FreeBSD") +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go new file mode 100644 index 000000000..5e364a88a --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/console_linux.go @@ -0,0 +1,157 @@ +package libcontainer + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +func ConsoleFromFile(f *os.File) Console { + return &linuxConsole{ + master: f, + } +} + +// newConsole returns an initialized console that can be used within a container by copying bytes +// from the master side to the slave that is attached as the tty for the container's init process. +func newConsole() (Console, error) { + master, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + if err := saneTerminal(master); err != nil { + return nil, err + } + console, err := ptsname(master) + if err != nil { + return nil, err + } + if err := unlockpt(master); err != nil { + return nil, err + } + return &linuxConsole{ + slavePath: console, + master: master, + }, nil +} + +// linuxConsole is a linux pseudo TTY for use within a container. +type linuxConsole struct { + master *os.File + slavePath string +} + +func (c *linuxConsole) File() *os.File { + return c.master +} + +func (c *linuxConsole) Path() string { + return c.slavePath +} + +func (c *linuxConsole) Read(b []byte) (int, error) { + return c.master.Read(b) +} + +func (c *linuxConsole) Write(b []byte) (int, error) { + return c.master.Write(b) +} + +func (c *linuxConsole) Close() error { + if m := c.master; m != nil { + return m.Close() + } + return nil +} + +// mount initializes the console inside the rootfs mounting with the specified mount label +// and applying the correct ownership of the console. +func (c *linuxConsole) mount() error { + oldMask := unix.Umask(0000) + defer unix.Umask(oldMask) + f, err := os.Create("/dev/console") + if err != nil && !os.IsExist(err) { + return err + } + if f != nil { + f.Close() + } + return unix.Mount(c.slavePath, "/dev/console", "bind", unix.MS_BIND, "") +} + +// dupStdio opens the slavePath for the console and dups the fds to the current +// processes stdio, fd 0,1,2. +func (c *linuxConsole) dupStdio() error { + slave, err := c.open(unix.O_RDWR) + if err != nil { + return err + } + fd := int(slave.Fd()) + for _, i := range []int{0, 1, 2} { + if err := unix.Dup3(fd, i, 0); err != nil { + return err + } + } + return nil +} + +// open is a clone of os.OpenFile without the O_CLOEXEC used to open the pty slave. +func (c *linuxConsole) open(flag int) (*os.File, error) { + r, e := unix.Open(c.slavePath, flag, 0) + if e != nil { + return nil, &os.PathError{ + Op: "open", + Path: c.slavePath, + Err: e, + } + } + return os.NewFile(uintptr(r), c.slavePath), nil +} + +func ioctl(fd uintptr, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + var n int32 + if err := ioctl(f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} + +// saneTerminal sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts normally. In particular, a not-very-well-known default of +// Linux unix98 ptys is that they have +onlcr by default. While this isn't a +// problem for terminal emulators, because we relay data from the terminal we +// also relay that funky line discipline. +func saneTerminal(terminal *os.File) error { + // Go doesn't have a wrapper for any of the termios ioctls. + var termios unix.Termios + + if err := ioctl(terminal.Fd(), unix.TCGETS, uintptr(unsafe.Pointer(&termios))); err != nil { + return fmt.Errorf("ioctl(tty, tcgets): %s", err.Error()) + } + + // Set -onlcr so we don't have to deal with \r. + termios.Oflag &^= unix.ONLCR + + if err := ioctl(terminal.Fd(), unix.TCSETS, uintptr(unsafe.Pointer(&termios))); err != nil { + return fmt.Errorf("ioctl(tty, tcsets): %s", err.Error()) + } + + return nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go new file mode 100644 index 000000000..e5ca54599 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go @@ -0,0 +1,11 @@ +package libcontainer + +import ( + "errors" +) + +// newConsole returns an initialized console that can be used within a container by copying bytes +// from the master side to the slave that is attached as the tty for the container's init process. +func newConsole() (Console, error) { + return nil, errors.New("libcontainer console is not supported on Solaris") +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go new file mode 100644 index 000000000..c61e866a5 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/console_windows.go @@ -0,0 +1,30 @@ +package libcontainer + +// newConsole returns an initialized console that can be used within a container +func newConsole() (Console, error) { + return &windowsConsole{}, nil +} + +// windowsConsole is a Windows pseudo TTY for use within a container. +type windowsConsole struct { +} + +func (c *windowsConsole) Fd() uintptr { + return 0 +} + +func (c *windowsConsole) Path() string { + return "" +} + +func (c *windowsConsole) Read(b []byte) (int, error) { + return 0, nil +} + +func (c *windowsConsole) Write(b []byte) (int, error) { + return 0, nil +} + +func (c *windowsConsole) Close() error { + return nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container.go b/vendor/github.com/opencontainers/runc/libcontainer/container.go new file mode 100644 index 000000000..3ddb5ec62 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/container.go @@ -0,0 +1,166 @@ +// Package libcontainer provides a native Go implementation for creating containers +// with namespaces, cgroups, capabilities, and filesystem access controls. +// It allows you to manage the lifecycle of the container performing additional operations +// after the container is created. +package libcontainer + +import ( + "os" + "time" + + "github.com/opencontainers/runc/libcontainer/configs" +) + +// Status is the status of a container. +type Status int + +const ( + // Created is the status that denotes the container exists but has not been run yet. + Created Status = iota + // Running is the status that denotes the container exists and is running. + Running + // Pausing is the status that denotes the container exists, it is in the process of being paused. + Pausing + // Paused is the status that denotes the container exists, but all its processes are paused. + Paused + // Stopped is the status that denotes the container does not have a created or running process. + Stopped +) + +func (s Status) String() string { + switch s { + case Created: + return "created" + case Running: + return "running" + case Pausing: + return "pausing" + case Paused: + return "paused" + case Stopped: + return "stopped" + default: + return "unknown" + } +} + +// BaseState represents the platform agnostic pieces relating to a +// running container's state +type BaseState struct { + // ID is the container ID. + ID string `json:"id"` + + // InitProcessPid is the init process id in the parent namespace. + InitProcessPid int `json:"init_process_pid"` + + // InitProcessStartTime is the init process start time in clock cycles since boot time. + InitProcessStartTime string `json:"init_process_start"` + + // Created is the unix timestamp for the creation time of the container in UTC + Created time.Time `json:"created"` + + // Config is the container's configuration. + Config configs.Config `json:"config"` +} + +// BaseContainer is a libcontainer container object. +// +// Each container is thread-safe within the same process. Since a container can +// be destroyed by a separate process, any function may return that the container +// was not found. BaseContainer includes methods that are platform agnostic. +type BaseContainer interface { + // Returns the ID of the container + ID() string + + // Returns the current status of the container. + // + // errors: + // ContainerNotExists - Container no longer exists, + // Systemerror - System error. + Status() (Status, error) + + // State returns the current container's state information. + // + // errors: + // SystemError - System error. + State() (*State, error) + + // Returns the current config of the container. + Config() configs.Config + + // Returns the PIDs inside this container. The PIDs are in the namespace of the calling process. + // + // errors: + // ContainerNotExists - Container no longer exists, + // Systemerror - System error. + // + // Some of the returned PIDs may no longer refer to processes in the Container, unless + // the Container state is PAUSED in which case every PID in the slice is valid. + Processes() ([]int, error) + + // Returns statistics for the container. + // + // errors: + // ContainerNotExists - Container no longer exists, + // Systemerror - System error. + Stats() (*Stats, error) + + // Set resources of container as configured + // + // We can use this to change resources when containers are running. + // + // errors: + // SystemError - System error. + Set(config configs.Config) error + + // Start a process inside the container. Returns error if process fails to + // start. You can track process lifecycle with passed Process structure. + // + // errors: + // ContainerNotExists - Container no longer exists, + // ConfigInvalid - config is invalid, + // ContainerPaused - Container is paused, + // SystemError - System error. + Start(process *Process) (err error) + + // Run immediately starts the process inside the container. Returns error if process + // fails to start. It does not block waiting for the exec fifo after start returns but + // opens the fifo after start returns. + // + // errors: + // ContainerNotExists - Container no longer exists, + // ConfigInvalid - config is invalid, + // ContainerPaused - Container is paused, + // SystemError - System error. + Run(process *Process) (err error) + + // Destroys the container, if its in a valid state, after killing any + // remaining running processes. + // + // Any event registrations are removed before the container is destroyed. + // No error is returned if the container is already destroyed. + // + // Running containers must first be stopped using Signal(..). + // Paused containers must first be resumed using Resume(..). + // + // errors: + // ContainerNotStopped - Container is still running, + // ContainerPaused - Container is paused, + // SystemError - System error. + Destroy() error + + // Signal sends the provided signal code to the container's initial process. + // + // If all is specified the signal is sent to all processes in the container + // including the initial process. + // + // errors: + // SystemError - System error. + Signal(s os.Signal, all bool) error + + // Exec signals the container to exec the users process at the end of the init. + // + // errors: + // SystemError - System error. + Exec() error +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go new file mode 100644 index 000000000..b5563d693 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go @@ -0,0 +1,1579 @@ +// +build linux + +package libcontainer + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/golang/protobuf/proto" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/criurpc" + "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/runc/libcontainer/utils" + "github.com/syndtr/gocapability/capability" + "github.com/vishvananda/netlink/nl" +) + +const stdioFdCount = 3 + +type linuxContainer struct { + id string + root string + config *configs.Config + cgroupManager cgroups.Manager + initArgs []string + initProcess parentProcess + initProcessStartTime string + criuPath string + m sync.Mutex + criuVersion int + state containerState + created time.Time +} + +// State represents a running container's state +type State struct { + BaseState + + // Platform specific fields below here + + // Specifies if the container was started under the rootless mode. + Rootless bool `json:"rootless"` + + // Path to all the cgroups setup for a container. Key is cgroup subsystem name + // with the value as the path. + CgroupPaths map[string]string `json:"cgroup_paths"` + + // NamespacePaths are filepaths to the container's namespaces. Key is the namespace type + // with the value as the path. + NamespacePaths map[configs.NamespaceType]string `json:"namespace_paths"` + + // Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore + ExternalDescriptors []string `json:"external_descriptors,omitempty"` +} + +// Container is a libcontainer container object. +// +// Each container is thread-safe within the same process. Since a container can +// be destroyed by a separate process, any function may return that the container +// was not found. +type Container interface { + BaseContainer + + // Methods below here are platform specific + + // Checkpoint checkpoints the running container's state to disk using the criu(8) utility. + // + // errors: + // Systemerror - System error. + Checkpoint(criuOpts *CriuOpts) error + + // Restore restores the checkpointed container to a running state using the criu(8) utility. + // + // errors: + // Systemerror - System error. + Restore(process *Process, criuOpts *CriuOpts) error + + // If the Container state is RUNNING or CREATED, sets the Container state to PAUSING and pauses + // the execution of any user processes. Asynchronously, when the container finished being paused the + // state is changed to PAUSED. + // If the Container state is PAUSED, do nothing. + // + // errors: + // ContainerNotExists - Container no longer exists, + // ContainerNotRunning - Container not running or created, + // Systemerror - System error. + Pause() error + + // If the Container state is PAUSED, resumes the execution of any user processes in the + // Container before setting the Container state to RUNNING. + // If the Container state is RUNNING, do nothing. + // + // errors: + // ContainerNotExists - Container no longer exists, + // ContainerNotPaused - Container is not paused, + // Systemerror - System error. + Resume() error + + // NotifyOOM returns a read-only channel signaling when the container receives an OOM notification. + // + // errors: + // Systemerror - System error. + NotifyOOM() (<-chan struct{}, error) + + // NotifyMemoryPressure returns a read-only channel signaling when the container reaches a given pressure level + // + // errors: + // Systemerror - System error. + NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) +} + +// ID returns the container's unique ID +func (c *linuxContainer) ID() string { + return c.id +} + +// Config returns the container's configuration +func (c *linuxContainer) Config() configs.Config { + return *c.config +} + +func (c *linuxContainer) Status() (Status, error) { + c.m.Lock() + defer c.m.Unlock() + return c.currentStatus() +} + +func (c *linuxContainer) State() (*State, error) { + c.m.Lock() + defer c.m.Unlock() + return c.currentState() +} + +func (c *linuxContainer) Processes() ([]int, error) { + pids, err := c.cgroupManager.GetAllPids() + if err != nil { + return nil, newSystemErrorWithCause(err, "getting all container pids from cgroups") + } + return pids, nil +} + +func (c *linuxContainer) Stats() (*Stats, error) { + var ( + err error + stats = &Stats{} + ) + if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil { + return stats, newSystemErrorWithCause(err, "getting container stats from cgroups") + } + for _, iface := range c.config.Networks { + switch iface.Type { + case "veth": + istats, err := getNetworkInterfaceStats(iface.HostInterfaceName) + if err != nil { + return stats, newSystemErrorWithCausef(err, "getting network stats for interface %q", iface.HostInterfaceName) + } + stats.Interfaces = append(stats.Interfaces, istats) + } + } + return stats, nil +} + +func (c *linuxContainer) Set(config configs.Config) error { + c.m.Lock() + defer c.m.Unlock() + status, err := c.currentStatus() + if err != nil { + return err + } + if status == Stopped { + return newGenericError(fmt.Errorf("container not running"), ContainerNotRunning) + } + c.config = &config + return c.cgroupManager.Set(c.config) +} + +func (c *linuxContainer) Start(process *Process) error { + c.m.Lock() + defer c.m.Unlock() + status, err := c.currentStatus() + if err != nil { + return err + } + if status == Stopped { + if err := c.createExecFifo(); err != nil { + return err + } + } + if err := c.start(process, status == Stopped); err != nil { + if status == Stopped { + c.deleteExecFifo() + } + return err + } + return nil +} + +func (c *linuxContainer) Run(process *Process) error { + c.m.Lock() + status, err := c.currentStatus() + if err != nil { + c.m.Unlock() + return err + } + c.m.Unlock() + if err := c.Start(process); err != nil { + return err + } + if status == Stopped { + return c.exec() + } + return nil +} + +func (c *linuxContainer) Exec() error { + c.m.Lock() + defer c.m.Unlock() + return c.exec() +} + +func (c *linuxContainer) exec() error { + path := filepath.Join(c.root, execFifoFilename) + f, err := os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + return newSystemErrorWithCause(err, "open exec fifo for reading") + } + defer f.Close() + data, err := ioutil.ReadAll(f) + if err != nil { + return err + } + if len(data) > 0 { + os.Remove(path) + return nil + } + return fmt.Errorf("cannot start an already running container") +} + +func (c *linuxContainer) start(process *Process, isInit bool) error { + parent, err := c.newParentProcess(process, isInit) + if err != nil { + return newSystemErrorWithCause(err, "creating new parent process") + } + if err := parent.start(); err != nil { + // terminate the process to ensure that it properly is reaped. + if err := parent.terminate(); err != nil { + logrus.Warn(err) + } + return newSystemErrorWithCause(err, "starting container process") + } + // generate a timestamp indicating when the container was started + c.created = time.Now().UTC() + if isInit { + c.state = &createdState{ + c: c, + } + state, err := c.updateState(parent) + if err != nil { + return err + } + c.initProcessStartTime = state.InitProcessStartTime + + if c.config.Hooks != nil { + s := configs.HookState{ + Version: c.config.Version, + ID: c.id, + Pid: parent.pid(), + Bundle: utils.SearchLabels(c.config.Labels, "bundle"), + } + for i, hook := range c.config.Hooks.Poststart { + if err := hook.Run(s); err != nil { + if err := parent.terminate(); err != nil { + logrus.Warn(err) + } + return newSystemErrorWithCausef(err, "running poststart hook %d", i) + } + } + } + } else { + c.state = &runningState{ + c: c, + } + } + return nil +} + +func (c *linuxContainer) Signal(s os.Signal, all bool) error { + if all { + return signalAllProcesses(c.cgroupManager, s) + } + if err := c.initProcess.signal(s); err != nil { + return newSystemErrorWithCause(err, "signaling init process") + } + return nil +} + +func (c *linuxContainer) createExecFifo() error { + rootuid, err := c.Config().HostRootUID() + if err != nil { + return err + } + rootgid, err := c.Config().HostRootGID() + if err != nil { + return err + } + + fifoName := filepath.Join(c.root, execFifoFilename) + if _, err := os.Stat(fifoName); err == nil { + return fmt.Errorf("exec fifo %s already exists", fifoName) + } + oldMask := syscall.Umask(0000) + if err := syscall.Mkfifo(fifoName, 0622); err != nil { + syscall.Umask(oldMask) + return err + } + syscall.Umask(oldMask) + if err := os.Chown(fifoName, rootuid, rootgid); err != nil { + return err + } + return nil +} + +func (c *linuxContainer) deleteExecFifo() { + fifoName := filepath.Join(c.root, execFifoFilename) + os.Remove(fifoName) +} + +func (c *linuxContainer) newParentProcess(p *Process, doInit bool) (parentProcess, error) { + parentPipe, childPipe, err := utils.NewSockPair("init") + if err != nil { + return nil, newSystemErrorWithCause(err, "creating new init pipe") + } + cmd, err := c.commandTemplate(p, childPipe) + if err != nil { + return nil, newSystemErrorWithCause(err, "creating new command template") + } + if !doInit { + return c.newSetnsProcess(p, cmd, parentPipe, childPipe) + } + + // We only set up rootDir if we're not doing a `runc exec`. The reason for + // this is to avoid cases where a racing, unprivileged process inside the + // container can get access to the statedir file descriptor (which would + // allow for container rootfs escape). + rootDir, err := os.Open(c.root) + if err != nil { + return nil, err + } + cmd.ExtraFiles = append(cmd.ExtraFiles, rootDir) + cmd.Env = append(cmd.Env, + fmt.Sprintf("_LIBCONTAINER_STATEDIR=%d", stdioFdCount+len(cmd.ExtraFiles)-1)) + return c.newInitProcess(p, cmd, parentPipe, childPipe, rootDir) +} + +func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.Cmd, error) { + cmd := exec.Command(c.initArgs[0], c.initArgs[1:]...) + cmd.Stdin = p.Stdin + cmd.Stdout = p.Stdout + cmd.Stderr = p.Stderr + cmd.Dir = c.config.Rootfs + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.ExtraFiles = append(cmd.ExtraFiles, p.ExtraFiles...) + if p.ConsoleSocket != nil { + cmd.ExtraFiles = append(cmd.ExtraFiles, p.ConsoleSocket) + cmd.Env = append(cmd.Env, + fmt.Sprintf("_LIBCONTAINER_CONSOLE=%d", stdioFdCount+len(cmd.ExtraFiles)-1), + ) + } + cmd.ExtraFiles = append(cmd.ExtraFiles, childPipe) + cmd.Env = append(cmd.Env, + fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1), + ) + // NOTE: when running a container with no PID namespace and the parent process spawning the container is + // PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason + // even with the parent still running. + if c.config.ParentDeathSignal > 0 { + cmd.SysProcAttr.Pdeathsig = syscall.Signal(c.config.ParentDeathSignal) + } + return cmd, nil +} + +func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe, rootDir *os.File) (*initProcess, error) { + cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initStandard)) + nsMaps := make(map[configs.NamespaceType]string) + for _, ns := range c.config.Namespaces { + if ns.Path != "" { + nsMaps[ns.Type] = ns.Path + } + } + _, sharePidns := nsMaps[configs.NEWPID] + data, err := c.bootstrapData(c.config.Namespaces.CloneFlags(), nsMaps) + if err != nil { + return nil, err + } + return &initProcess{ + cmd: cmd, + childPipe: childPipe, + parentPipe: parentPipe, + manager: c.cgroupManager, + config: c.newInitConfig(p), + container: c, + process: p, + bootstrapData: data, + sharePidns: sharePidns, + rootDir: rootDir, + }, nil +} + +func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*setnsProcess, error) { + cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns)) + state, err := c.currentState() + if err != nil { + return nil, newSystemErrorWithCause(err, "getting container's current state") + } + // for setns process, we don't have to set cloneflags as the process namespaces + // will only be set via setns syscall + data, err := c.bootstrapData(0, state.NamespacePaths) + if err != nil { + return nil, err + } + return &setnsProcess{ + cmd: cmd, + cgroupPaths: c.cgroupManager.GetPaths(), + childPipe: childPipe, + parentPipe: parentPipe, + config: c.newInitConfig(p), + process: p, + bootstrapData: data, + }, nil +} + +func (c *linuxContainer) newInitConfig(process *Process) *initConfig { + cfg := &initConfig{ + Config: c.config, + Args: process.Args, + Env: process.Env, + User: process.User, + AdditionalGroups: process.AdditionalGroups, + Cwd: process.Cwd, + Capabilities: process.Capabilities, + PassedFilesCount: len(process.ExtraFiles), + ContainerId: c.ID(), + NoNewPrivileges: c.config.NoNewPrivileges, + Rootless: c.config.Rootless, + AppArmorProfile: c.config.AppArmorProfile, + ProcessLabel: c.config.ProcessLabel, + Rlimits: c.config.Rlimits, + } + if process.NoNewPrivileges != nil { + cfg.NoNewPrivileges = *process.NoNewPrivileges + } + if process.AppArmorProfile != "" { + cfg.AppArmorProfile = process.AppArmorProfile + } + if process.Label != "" { + cfg.ProcessLabel = process.Label + } + if len(process.Rlimits) > 0 { + cfg.Rlimits = process.Rlimits + } + cfg.CreateConsole = process.ConsoleSocket != nil + return cfg +} + +func (c *linuxContainer) Destroy() error { + c.m.Lock() + defer c.m.Unlock() + return c.state.destroy() +} + +func (c *linuxContainer) Pause() error { + c.m.Lock() + defer c.m.Unlock() + status, err := c.currentStatus() + if err != nil { + return err + } + switch status { + case Running, Created: + if err := c.cgroupManager.Freeze(configs.Frozen); err != nil { + return err + } + return c.state.transition(&pausedState{ + c: c, + }) + } + return newGenericError(fmt.Errorf("container not running or created: %s", status), ContainerNotRunning) +} + +func (c *linuxContainer) Resume() error { + c.m.Lock() + defer c.m.Unlock() + status, err := c.currentStatus() + if err != nil { + return err + } + if status != Paused { + return newGenericError(fmt.Errorf("container not paused"), ContainerNotPaused) + } + if err := c.cgroupManager.Freeze(configs.Thawed); err != nil { + return err + } + return c.state.transition(&runningState{ + c: c, + }) +} + +func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) { + // XXX(cyphar): This requires cgroups. + if c.config.Rootless { + return nil, fmt.Errorf("cannot get OOM notifications from rootless container") + } + return notifyOnOOM(c.cgroupManager.GetPaths()) +} + +func (c *linuxContainer) NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) { + // XXX(cyphar): This requires cgroups. + if c.config.Rootless { + return nil, fmt.Errorf("cannot get memory pressure notifications from rootless container") + } + return notifyMemoryPressure(c.cgroupManager.GetPaths(), level) +} + +var criuFeatures *criurpc.CriuFeatures + +func (c *linuxContainer) checkCriuFeatures(criuOpts *CriuOpts, rpcOpts *criurpc.CriuOpts, criuFeat *criurpc.CriuFeatures) error { + + var t criurpc.CriuReqType + t = criurpc.CriuReqType_FEATURE_CHECK + + if err := c.checkCriuVersion("1.8"); err != nil { + // Feature checking was introduced with CRIU 1.8. + // Ignore the feature check if an older CRIU version is used + // and just act as before. + // As all automated PR testing is done using CRIU 1.7 this + // code will not be tested by automated PR testing. + return nil + } + + // make sure the features we are looking for are really not from + // some previous check + criuFeatures = nil + + req := &criurpc.CriuReq{ + Type: &t, + // Theoretically this should not be necessary but CRIU + // segfaults if Opts is empty. + // Fixed in CRIU 2.12 + Opts: rpcOpts, + Features: criuFeat, + } + + err := c.criuSwrk(nil, req, criuOpts, false) + if err != nil { + logrus.Debugf("%s", err) + return fmt.Errorf("CRIU feature check failed") + } + + logrus.Debugf("Feature check says: %s", criuFeatures) + missingFeatures := false + + if *criuFeat.MemTrack && !*criuFeatures.MemTrack { + missingFeatures = true + logrus.Debugf("CRIU does not support MemTrack") + } + + if missingFeatures { + return fmt.Errorf("CRIU is missing features") + } + + return nil +} + +// checkCriuVersion checks Criu version greater than or equal to minVersion +func (c *linuxContainer) checkCriuVersion(minVersion string) error { + var x, y, z, versionReq int + + _, err := fmt.Sscanf(minVersion, "%d.%d.%d\n", &x, &y, &z) // 1.5.2 + if err != nil { + _, err = fmt.Sscanf(minVersion, "Version: %d.%d\n", &x, &y) // 1.6 + } + versionReq = x*10000 + y*100 + z + + out, err := exec.Command(c.criuPath, "-V").Output() + if err != nil { + return fmt.Errorf("Unable to execute CRIU command: %s", c.criuPath) + } + + x = 0 + y = 0 + z = 0 + if ep := strings.Index(string(out), "-"); ep >= 0 { + // criu Git version format + var version string + if sp := strings.Index(string(out), "GitID"); sp > 0 { + version = string(out)[sp:ep] + } else { + return fmt.Errorf("Unable to parse the CRIU version: %s", c.criuPath) + } + + n, err := fmt.Sscanf(string(version), "GitID: v%d.%d.%d", &x, &y, &z) // 1.5.2 + if err != nil { + n, err = fmt.Sscanf(string(version), "GitID: v%d.%d", &x, &y) // 1.6 + y++ + } else { + z++ + } + if n < 2 || err != nil { + return fmt.Errorf("Unable to parse the CRIU version: %s %d %s", version, n, err) + } + } else { + // criu release version format + n, err := fmt.Sscanf(string(out), "Version: %d.%d.%d\n", &x, &y, &z) // 1.5.2 + if err != nil { + n, err = fmt.Sscanf(string(out), "Version: %d.%d\n", &x, &y) // 1.6 + } + if n < 2 || err != nil { + return fmt.Errorf("Unable to parse the CRIU version: %s %d %s", out, n, err) + } + } + + c.criuVersion = x*10000 + y*100 + z + + if c.criuVersion < versionReq { + return fmt.Errorf("CRIU version must be %s or higher", minVersion) + } + + return nil +} + +const descriptorsFilename = "descriptors.json" + +func (c *linuxContainer) addCriuDumpMount(req *criurpc.CriuReq, m *configs.Mount) { + mountDest := m.Destination + if strings.HasPrefix(mountDest, c.config.Rootfs) { + mountDest = mountDest[len(c.config.Rootfs):] + } + + extMnt := &criurpc.ExtMountMap{ + Key: proto.String(mountDest), + Val: proto.String(mountDest), + } + req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) +} + +func (c *linuxContainer) addMaskPaths(req *criurpc.CriuReq) error { + for _, path := range c.config.MaskPaths { + fi, err := os.Stat(fmt.Sprintf("/proc/%d/root/%s", c.initProcess.pid(), path)) + if err != nil { + if os.IsNotExist(err) { + continue + } + return err + } + if fi.IsDir() { + continue + } + + extMnt := &criurpc.ExtMountMap{ + Key: proto.String(path), + Val: proto.String("/dev/null"), + } + req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) + } + + return nil +} + +func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error { + c.m.Lock() + defer c.m.Unlock() + + // TODO(avagin): Figure out how to make this work nicely. CRIU 2.0 has + // support for doing unprivileged dumps, but the setup of + // rootless containers might make this complicated. + if c.config.Rootless { + return fmt.Errorf("cannot checkpoint a rootless container") + } + + if err := c.checkCriuVersion("1.5.2"); err != nil { + return err + } + + if criuOpts.ImagesDirectory == "" { + return fmt.Errorf("invalid directory to save checkpoint") + } + + // Since a container can be C/R'ed multiple times, + // the checkpoint directory may already exist. + if err := os.Mkdir(criuOpts.ImagesDirectory, 0755); err != nil && !os.IsExist(err) { + return err + } + + if criuOpts.WorkDirectory == "" { + criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") + } + + if err := os.Mkdir(criuOpts.WorkDirectory, 0755); err != nil && !os.IsExist(err) { + return err + } + + workDir, err := os.Open(criuOpts.WorkDirectory) + if err != nil { + return err + } + defer workDir.Close() + + imageDir, err := os.Open(criuOpts.ImagesDirectory) + if err != nil { + return err + } + defer imageDir.Close() + + rpcOpts := criurpc.CriuOpts{ + ImagesDirFd: proto.Int32(int32(imageDir.Fd())), + WorkDirFd: proto.Int32(int32(workDir.Fd())), + LogLevel: proto.Int32(4), + LogFile: proto.String("dump.log"), + Root: proto.String(c.config.Rootfs), + ManageCgroups: proto.Bool(true), + NotifyScripts: proto.Bool(true), + Pid: proto.Int32(int32(c.initProcess.pid())), + ShellJob: proto.Bool(criuOpts.ShellJob), + LeaveRunning: proto.Bool(criuOpts.LeaveRunning), + TcpEstablished: proto.Bool(criuOpts.TcpEstablished), + ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections), + FileLocks: proto.Bool(criuOpts.FileLocks), + EmptyNs: proto.Uint32(criuOpts.EmptyNs), + } + + // append optional criu opts, e.g., page-server and port + if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 { + rpcOpts.Ps = &criurpc.CriuPageServerInfo{ + Address: proto.String(criuOpts.PageServer.Address), + Port: proto.Int32(criuOpts.PageServer.Port), + } + } + + //pre-dump may need parentImage param to complete iterative migration + if criuOpts.ParentImage != "" { + rpcOpts.ParentImg = proto.String(criuOpts.ParentImage) + rpcOpts.TrackMem = proto.Bool(true) + } + + // append optional manage cgroups mode + if criuOpts.ManageCgroupsMode != 0 { + if err := c.checkCriuVersion("1.7"); err != nil { + return err + } + mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode) + rpcOpts.ManageCgroupsMode = &mode + } + + var t criurpc.CriuReqType + if criuOpts.PreDump { + feat := criurpc.CriuFeatures{ + MemTrack: proto.Bool(true), + } + + if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil { + return err + } + + t = criurpc.CriuReqType_PRE_DUMP + } else { + t = criurpc.CriuReqType_DUMP + } + req := &criurpc.CriuReq{ + Type: &t, + Opts: &rpcOpts, + } + + //no need to dump these information in pre-dump + if !criuOpts.PreDump { + for _, m := range c.config.Mounts { + switch m.Device { + case "bind": + c.addCriuDumpMount(req, m) + break + case "cgroup": + binds, err := getCgroupMounts(m) + if err != nil { + return err + } + for _, b := range binds { + c.addCriuDumpMount(req, b) + } + break + } + } + + if err := c.addMaskPaths(req); err != nil { + return err + } + + for _, node := range c.config.Devices { + m := &configs.Mount{Destination: node.Path, Source: node.Path} + c.addCriuDumpMount(req, m) + } + + // Write the FD info to a file in the image directory + fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors()) + if err != nil { + return err + } + + err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0655) + if err != nil { + return err + } + } + + err = c.criuSwrk(nil, req, criuOpts, false) + if err != nil { + return err + } + return nil +} + +func (c *linuxContainer) addCriuRestoreMount(req *criurpc.CriuReq, m *configs.Mount) { + mountDest := m.Destination + if strings.HasPrefix(mountDest, c.config.Rootfs) { + mountDest = mountDest[len(c.config.Rootfs):] + } + + extMnt := &criurpc.ExtMountMap{ + Key: proto.String(mountDest), + Val: proto.String(m.Source), + } + req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt) +} + +func (c *linuxContainer) restoreNetwork(req *criurpc.CriuReq, criuOpts *CriuOpts) { + for _, iface := range c.config.Networks { + switch iface.Type { + case "veth": + veth := new(criurpc.CriuVethPair) + veth.IfOut = proto.String(iface.HostInterfaceName) + veth.IfIn = proto.String(iface.Name) + req.Opts.Veths = append(req.Opts.Veths, veth) + break + case "loopback": + break + } + } + for _, i := range criuOpts.VethPairs { + veth := new(criurpc.CriuVethPair) + veth.IfOut = proto.String(i.HostInterfaceName) + veth.IfIn = proto.String(i.ContainerInterfaceName) + req.Opts.Veths = append(req.Opts.Veths, veth) + } +} + +func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error { + c.m.Lock() + defer c.m.Unlock() + + // TODO(avagin): Figure out how to make this work nicely. CRIU doesn't have + // support for unprivileged restore at the moment. + if c.config.Rootless { + return fmt.Errorf("cannot restore a rootless container") + } + + if err := c.checkCriuVersion("1.5.2"); err != nil { + return err + } + if criuOpts.WorkDirectory == "" { + criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work") + } + // Since a container can be C/R'ed multiple times, + // the work directory may already exist. + if err := os.Mkdir(criuOpts.WorkDirectory, 0655); err != nil && !os.IsExist(err) { + return err + } + workDir, err := os.Open(criuOpts.WorkDirectory) + if err != nil { + return err + } + defer workDir.Close() + if criuOpts.ImagesDirectory == "" { + return fmt.Errorf("invalid directory to restore checkpoint") + } + imageDir, err := os.Open(criuOpts.ImagesDirectory) + if err != nil { + return err + } + defer imageDir.Close() + // CRIU has a few requirements for a root directory: + // * it must be a mount point + // * its parent must not be overmounted + // c.config.Rootfs is bind-mounted to a temporary directory + // to satisfy these requirements. + root := filepath.Join(c.root, "criu-root") + if err := os.Mkdir(root, 0755); err != nil { + return err + } + defer os.Remove(root) + root, err = filepath.EvalSymlinks(root) + if err != nil { + return err + } + err = syscall.Mount(c.config.Rootfs, root, "", syscall.MS_BIND|syscall.MS_REC, "") + if err != nil { + return err + } + defer syscall.Unmount(root, syscall.MNT_DETACH) + t := criurpc.CriuReqType_RESTORE + req := &criurpc.CriuReq{ + Type: &t, + Opts: &criurpc.CriuOpts{ + ImagesDirFd: proto.Int32(int32(imageDir.Fd())), + WorkDirFd: proto.Int32(int32(workDir.Fd())), + EvasiveDevices: proto.Bool(true), + LogLevel: proto.Int32(4), + LogFile: proto.String("restore.log"), + RstSibling: proto.Bool(true), + Root: proto.String(root), + ManageCgroups: proto.Bool(true), + NotifyScripts: proto.Bool(true), + ShellJob: proto.Bool(criuOpts.ShellJob), + ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections), + TcpEstablished: proto.Bool(criuOpts.TcpEstablished), + FileLocks: proto.Bool(criuOpts.FileLocks), + EmptyNs: proto.Uint32(criuOpts.EmptyNs), + }, + } + + for _, m := range c.config.Mounts { + switch m.Device { + case "bind": + c.addCriuRestoreMount(req, m) + break + case "cgroup": + binds, err := getCgroupMounts(m) + if err != nil { + return err + } + for _, b := range binds { + c.addCriuRestoreMount(req, b) + } + break + } + } + + if len(c.config.MaskPaths) > 0 { + m := &configs.Mount{Destination: "/dev/null", Source: "/dev/null"} + c.addCriuRestoreMount(req, m) + } + + for _, node := range c.config.Devices { + m := &configs.Mount{Destination: node.Path, Source: node.Path} + c.addCriuRestoreMount(req, m) + } + + if criuOpts.EmptyNs&syscall.CLONE_NEWNET == 0 { + c.restoreNetwork(req, criuOpts) + } + + // append optional manage cgroups mode + if criuOpts.ManageCgroupsMode != 0 { + if err := c.checkCriuVersion("1.7"); err != nil { + return err + } + mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode) + req.Opts.ManageCgroupsMode = &mode + } + + var ( + fds []string + fdJSON []byte + ) + if fdJSON, err = ioutil.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename)); err != nil { + return err + } + + if err := json.Unmarshal(fdJSON, &fds); err != nil { + return err + } + for i := range fds { + if s := fds[i]; strings.Contains(s, "pipe:") { + inheritFd := new(criurpc.InheritFd) + inheritFd.Key = proto.String(s) + inheritFd.Fd = proto.Int32(int32(i)) + req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd) + } + } + return c.criuSwrk(process, req, criuOpts, true) +} + +func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error { + // XXX: Do we need to deal with this case? AFAIK criu still requires root. + if err := c.cgroupManager.Apply(pid); err != nil { + return err + } + + if err := c.cgroupManager.Set(c.config); err != nil { + return newSystemError(err) + } + + path := fmt.Sprintf("/proc/%d/cgroup", pid) + cgroupsPaths, err := cgroups.ParseCgroupFile(path) + if err != nil { + return err + } + + for c, p := range cgroupsPaths { + cgroupRoot := &criurpc.CgroupRoot{ + Ctrl: proto.String(c), + Path: proto.String(p), + } + req.Opts.CgRoot = append(req.Opts.CgRoot, cgroupRoot) + } + + return nil +} + +func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts, applyCgroups bool) error { + fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_SEQPACKET|syscall.SOCK_CLOEXEC, 0) + if err != nil { + return err + } + + logPath := filepath.Join(opts.WorkDirectory, req.GetOpts().GetLogFile()) + criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client") + criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server") + defer criuClient.Close() + defer criuServer.Close() + + args := []string{"swrk", "3"} + logrus.Debugf("Using CRIU %d at: %s", c.criuVersion, c.criuPath) + logrus.Debugf("Using CRIU with following args: %s", args) + cmd := exec.Command(c.criuPath, args...) + if process != nil { + cmd.Stdin = process.Stdin + cmd.Stdout = process.Stdout + cmd.Stderr = process.Stderr + } + cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer) + + if err := cmd.Start(); err != nil { + return err + } + criuServer.Close() + + defer func() { + criuClient.Close() + _, err := cmd.Process.Wait() + if err != nil { + return + } + }() + + if applyCgroups { + err := c.criuApplyCgroups(cmd.Process.Pid, req) + if err != nil { + return err + } + } + + var extFds []string + if process != nil { + extFds, err = getPipeFds(cmd.Process.Pid) + if err != nil { + return err + } + } + + logrus.Debugf("Using CRIU in %s mode", req.GetType().String()) + // In the case of criurpc.CriuReqType_FEATURE_CHECK req.GetOpts() + // should be empty. For older CRIU versions it still will be + // available but empty. + if req.GetType() != criurpc.CriuReqType_FEATURE_CHECK { + val := reflect.ValueOf(req.GetOpts()) + v := reflect.Indirect(val) + for i := 0; i < v.NumField(); i++ { + st := v.Type() + name := st.Field(i).Name + if strings.HasPrefix(name, "XXX_") { + continue + } + value := val.MethodByName("Get" + name).Call([]reflect.Value{}) + logrus.Debugf("CRIU option %s with value %v", name, value[0]) + } + } + data, err := proto.Marshal(req) + if err != nil { + return err + } + _, err = criuClient.Write(data) + if err != nil { + return err + } + + buf := make([]byte, 10*4096) + for true { + n, err := criuClient.Read(buf) + if err != nil { + return err + } + if n == 0 { + return fmt.Errorf("unexpected EOF") + } + if n == len(buf) { + return fmt.Errorf("buffer is too small") + } + + resp := new(criurpc.CriuResp) + err = proto.Unmarshal(buf[:n], resp) + if err != nil { + return err + } + if !resp.GetSuccess() { + typeString := req.GetType().String() + return fmt.Errorf("criu failed: type %s errno %d\nlog file: %s", typeString, resp.GetCrErrno(), logPath) + } + + t := resp.GetType() + switch { + case t == criurpc.CriuReqType_FEATURE_CHECK: + logrus.Debugf("Feature check says: %s", resp) + criuFeatures = resp.GetFeatures() + break + case t == criurpc.CriuReqType_NOTIFY: + if err := c.criuNotifications(resp, process, opts, extFds); err != nil { + return err + } + t = criurpc.CriuReqType_NOTIFY + req = &criurpc.CriuReq{ + Type: &t, + NotifySuccess: proto.Bool(true), + } + data, err = proto.Marshal(req) + if err != nil { + return err + } + _, err = criuClient.Write(data) + if err != nil { + return err + } + continue + case t == criurpc.CriuReqType_RESTORE: + case t == criurpc.CriuReqType_DUMP: + break + case t == criurpc.CriuReqType_PRE_DUMP: + // In pre-dump mode CRIU is in a loop and waits for + // the final DUMP command. + // The current runc pre-dump approach, however, is + // start criu in PRE_DUMP once for a single pre-dump + // and not the whole series of pre-dump, pre-dump, ...m, dump + // If we got the message CriuReqType_PRE_DUMP it means + // CRIU was successful and we need to forcefully stop CRIU + logrus.Debugf("PRE_DUMP finished. Send close signal to CRIU service") + criuClient.Close() + // Process status won't be success, because one end of sockets is closed + _, err := cmd.Process.Wait() + if err != nil { + logrus.Debugf("After PRE_DUMP CRIU exiting failed") + return err + } + return nil + default: + return fmt.Errorf("unable to parse the response %s", resp.String()) + } + + break + } + + // cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors. + // Here we want to wait only the CRIU process. + st, err := cmd.Process.Wait() + if err != nil { + return err + } + if !st.Success() { + return fmt.Errorf("criu failed: %s\nlog file: %s", st.String(), logPath) + } + return nil +} + +// block any external network activity +func lockNetwork(config *configs.Config) error { + for _, config := range config.Networks { + strategy, err := getStrategy(config.Type) + if err != nil { + return err + } + + if err := strategy.detach(config); err != nil { + return err + } + } + return nil +} + +func unlockNetwork(config *configs.Config) error { + for _, config := range config.Networks { + strategy, err := getStrategy(config.Type) + if err != nil { + return err + } + if err = strategy.attach(config); err != nil { + return err + } + } + return nil +} + +func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, opts *CriuOpts, fds []string) error { + notify := resp.GetNotify() + if notify == nil { + return fmt.Errorf("invalid response: %s", resp.String()) + } + switch { + case notify.GetScript() == "post-dump": + f, err := os.Create(filepath.Join(c.root, "checkpoint")) + if err != nil { + return err + } + f.Close() + case notify.GetScript() == "network-unlock": + if err := unlockNetwork(c.config); err != nil { + return err + } + case notify.GetScript() == "network-lock": + if err := lockNetwork(c.config); err != nil { + return err + } + case notify.GetScript() == "setup-namespaces": + if c.config.Hooks != nil { + s := configs.HookState{ + Version: c.config.Version, + ID: c.id, + Pid: int(notify.GetPid()), + Bundle: utils.SearchLabels(c.config.Labels, "bundle"), + } + for i, hook := range c.config.Hooks.Prestart { + if err := hook.Run(s); err != nil { + return newSystemErrorWithCausef(err, "running prestart hook %d", i) + } + } + } + case notify.GetScript() == "post-restore": + pid := notify.GetPid() + r, err := newRestoredProcess(int(pid), fds) + if err != nil { + return err + } + process.ops = r + if err := c.state.transition(&restoredState{ + imageDir: opts.ImagesDirectory, + c: c, + }); err != nil { + return err + } + // create a timestamp indicating when the restored checkpoint was started + c.created = time.Now().UTC() + if _, err := c.updateState(r); err != nil { + return err + } + if err := os.Remove(filepath.Join(c.root, "checkpoint")); err != nil { + if !os.IsNotExist(err) { + logrus.Error(err) + } + } + } + return nil +} + +func (c *linuxContainer) updateState(process parentProcess) (*State, error) { + c.initProcess = process + state, err := c.currentState() + if err != nil { + return nil, err + } + err = c.saveState(state) + if err != nil { + return nil, err + } + return state, nil +} + +func (c *linuxContainer) saveState(s *State) error { + f, err := os.Create(filepath.Join(c.root, stateFilename)) + if err != nil { + return err + } + defer f.Close() + return utils.WriteJSON(f, s) +} + +func (c *linuxContainer) deleteState() error { + return os.Remove(filepath.Join(c.root, stateFilename)) +} + +func (c *linuxContainer) currentStatus() (Status, error) { + if err := c.refreshState(); err != nil { + return -1, err + } + return c.state.status(), nil +} + +// refreshState needs to be called to verify that the current state on the +// container is what is true. Because consumers of libcontainer can use it +// out of process we need to verify the container's status based on runtime +// information and not rely on our in process info. +func (c *linuxContainer) refreshState() error { + paused, err := c.isPaused() + if err != nil { + return err + } + if paused { + return c.state.transition(&pausedState{c: c}) + } + t, err := c.runType() + if err != nil { + return err + } + switch t { + case Created: + return c.state.transition(&createdState{c: c}) + case Running: + return c.state.transition(&runningState{c: c}) + } + return c.state.transition(&stoppedState{c: c}) +} + +// doesInitProcessExist checks if the init process is still the same process +// as the initial one, it could happen that the original process has exited +// and a new process has been created with the same pid, in this case, the +// container would already be stopped. +func (c *linuxContainer) doesInitProcessExist(initPid int) (bool, error) { + startTime, err := system.GetProcessStartTime(initPid) + if err != nil { + return false, newSystemErrorWithCausef(err, "getting init process %d start time", initPid) + } + if c.initProcessStartTime != startTime { + return false, nil + } + return true, nil +} + +func (c *linuxContainer) runType() (Status, error) { + if c.initProcess == nil { + return Stopped, nil + } + pid := c.initProcess.pid() + // return Running if the init process is alive + if err := syscall.Kill(pid, 0); err != nil { + if err == syscall.ESRCH { + // It means the process does not exist anymore, could happen when the + // process exited just when we call the function, we should not return + // error in this case. + return Stopped, nil + } + return Stopped, newSystemErrorWithCausef(err, "sending signal 0 to pid %d", pid) + } + // check if the process is still the original init process. + exist, err := c.doesInitProcessExist(pid) + if !exist || err != nil { + return Stopped, err + } + // We'll create exec fifo and blocking on it after container is created, + // and delete it after start container. + if _, err := os.Stat(filepath.Join(c.root, execFifoFilename)); err == nil { + return Created, nil + } + return Running, nil +} + +func (c *linuxContainer) isPaused() (bool, error) { + fcg := c.cgroupManager.GetPaths()["freezer"] + if fcg == "" { + // A container doesn't have a freezer cgroup + return false, nil + } + data, err := ioutil.ReadFile(filepath.Join(fcg, "freezer.state")) + if err != nil { + // If freezer cgroup is not mounted, the container would just be not paused. + if os.IsNotExist(err) { + return false, nil + } + return false, newSystemErrorWithCause(err, "checking if container is paused") + } + return bytes.Equal(bytes.TrimSpace(data), []byte("FROZEN")), nil +} + +func (c *linuxContainer) currentState() (*State, error) { + var ( + startTime string + externalDescriptors []string + pid = -1 + ) + if c.initProcess != nil { + pid = c.initProcess.pid() + startTime, _ = c.initProcess.startTime() + externalDescriptors = c.initProcess.externalDescriptors() + } + state := &State{ + BaseState: BaseState{ + ID: c.ID(), + Config: *c.config, + InitProcessPid: pid, + InitProcessStartTime: startTime, + Created: c.created, + }, + Rootless: c.config.Rootless, + CgroupPaths: c.cgroupManager.GetPaths(), + NamespacePaths: make(map[configs.NamespaceType]string), + ExternalDescriptors: externalDescriptors, + } + if pid > 0 { + for _, ns := range c.config.Namespaces { + state.NamespacePaths[ns.Type] = ns.GetPath(pid) + } + for _, nsType := range configs.NamespaceTypes() { + if !configs.IsNamespaceSupported(nsType) { + continue + } + if _, ok := state.NamespacePaths[nsType]; !ok { + ns := configs.Namespace{Type: nsType} + state.NamespacePaths[ns.Type] = ns.GetPath(pid) + } + } + } + return state, nil +} + +// orderNamespacePaths sorts namespace paths into a list of paths that we +// can setns in order. +func (c *linuxContainer) orderNamespacePaths(namespaces map[configs.NamespaceType]string) ([]string, error) { + paths := []string{} + order := []configs.NamespaceType{ + // The user namespace *must* be done first. + configs.NEWUSER, + configs.NEWIPC, + configs.NEWUTS, + configs.NEWNET, + configs.NEWPID, + configs.NEWNS, + } + + // Remove namespaces that we don't need to join. + var nsTypes []configs.NamespaceType + for _, ns := range order { + if c.config.Namespaces.Contains(ns) { + nsTypes = append(nsTypes, ns) + } + } + for _, nsType := range nsTypes { + if p, ok := namespaces[nsType]; ok && p != "" { + // check if the requested namespace is supported + if !configs.IsNamespaceSupported(nsType) { + return nil, newSystemError(fmt.Errorf("namespace %s is not supported", nsType)) + } + // only set to join this namespace if it exists + if _, err := os.Lstat(p); err != nil { + return nil, newSystemErrorWithCausef(err, "running lstat on namespace path %q", p) + } + // do not allow namespace path with comma as we use it to separate + // the namespace paths + if strings.ContainsRune(p, ',') { + return nil, newSystemError(fmt.Errorf("invalid path %s", p)) + } + paths = append(paths, fmt.Sprintf("%s:%s", configs.NsName(nsType), p)) + } + } + return paths, nil +} + +func encodeIDMapping(idMap []configs.IDMap) ([]byte, error) { + data := bytes.NewBuffer(nil) + for _, im := range idMap { + line := fmt.Sprintf("%d %d %d\n", im.ContainerID, im.HostID, im.Size) + if _, err := data.WriteString(line); err != nil { + return nil, err + } + } + return data.Bytes(), nil +} + +// bootstrapData encodes the necessary data in netlink binary format +// as a io.Reader. +// Consumer can write the data to a bootstrap program +// such as one that uses nsenter package to bootstrap the container's +// init process correctly, i.e. with correct namespaces, uid/gid +// mapping etc. +func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.NamespaceType]string) (io.Reader, error) { + // create the netlink message + r := nl.NewNetlinkRequest(int(InitMsg), 0) + + // write cloneFlags + r.AddData(&Int32msg{ + Type: CloneFlagsAttr, + Value: uint32(cloneFlags), + }) + + // write custom namespace paths + if len(nsMaps) > 0 { + nsPaths, err := c.orderNamespacePaths(nsMaps) + if err != nil { + return nil, err + } + r.AddData(&Bytemsg{ + Type: NsPathsAttr, + Value: []byte(strings.Join(nsPaths, ",")), + }) + } + + // write namespace paths only when we are not joining an existing user ns + _, joinExistingUser := nsMaps[configs.NEWUSER] + if !joinExistingUser { + // write uid mappings + if len(c.config.UidMappings) > 0 { + b, err := encodeIDMapping(c.config.UidMappings) + if err != nil { + return nil, err + } + r.AddData(&Bytemsg{ + Type: UidmapAttr, + Value: b, + }) + } + + // write gid mappings + if len(c.config.GidMappings) > 0 { + b, err := encodeIDMapping(c.config.GidMappings) + if err != nil { + return nil, err + } + r.AddData(&Bytemsg{ + Type: GidmapAttr, + Value: b, + }) + // The following only applies if we are root. + if !c.config.Rootless { + // check if we have CAP_SETGID to setgroup properly + pid, err := capability.NewPid(os.Getpid()) + if err != nil { + return nil, err + } + if !pid.Get(capability.EFFECTIVE, capability.CAP_SETGID) { + r.AddData(&Boolmsg{ + Type: SetgroupAttr, + Value: true, + }) + } + } + } + } + + // write oom_score_adj + r.AddData(&Bytemsg{ + Type: OomScoreAdjAttr, + Value: []byte(fmt.Sprintf("%d", c.config.OomScoreAdj)), + }) + + // write rootless + r.AddData(&Boolmsg{ + Type: RootlessAttr, + Value: c.config.Rootless, + }) + + return bytes.NewReader(r.Serialize()), nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go new file mode 100644 index 000000000..bb84ff740 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go @@ -0,0 +1,20 @@ +package libcontainer + +// State represents a running container's state +type State struct { + BaseState + + // Platform specific fields below here +} + +// A libcontainer container object. +// +// Each container is thread-safe within the same process. Since a container can +// be destroyed by a separate process, any function may return that the container +// was not found. +type Container interface { + BaseContainer + + // Methods below here are platform specific + +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go new file mode 100644 index 000000000..bb84ff740 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/container_windows.go @@ -0,0 +1,20 @@ +package libcontainer + +// State represents a running container's state +type State struct { + BaseState + + // Platform specific fields below here +} + +// A libcontainer container object. +// +// Each container is thread-safe within the same process. Since a container can +// be destroyed by a separate process, any function may return that the container +// was not found. +type Container interface { + BaseContainer + + // Methods below here are platform specific + +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go new file mode 100644 index 000000000..9d7d4dc89 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go @@ -0,0 +1,39 @@ +// +build linux freebsd + +package libcontainer + +// cgroup restoring strategy provided by criu +type cgMode uint32 + +const ( + CRIU_CG_MODE_SOFT cgMode = 3 + iota // restore cgroup properties if only dir created by criu + CRIU_CG_MODE_FULL // always restore all cgroups and their properties + CRIU_CG_MODE_STRICT // restore all, requiring them to not present in the system + CRIU_CG_MODE_DEFAULT // the same as CRIU_CG_MODE_SOFT +) + +type CriuPageServerInfo struct { + Address string // IP address of CRIU page server + Port int32 // port number of CRIU page server +} + +type VethPairName struct { + ContainerInterfaceName string + HostInterfaceName string +} + +type CriuOpts struct { + ImagesDirectory string // directory for storing image files + WorkDirectory string // directory to cd and write logs/pidfiles/stats to + ParentImage string // direcotry for storing parent image files in pre-dump and dump + LeaveRunning bool // leave container in running state after checkpoint + TcpEstablished bool // checkpoint/restore established TCP connections + ExternalUnixConnections bool // allow external unix connections + ShellJob bool // allow to dump and restore shell jobs + FileLocks bool // handle file locks, for safety + PreDump bool // call criu predump to perform iterative checkpoint + PageServer CriuPageServerInfo // allow to dump to criu page server + VethPairs []VethPairName // pass the veth to criu when restore + ManageCgroupsMode cgMode // dump or restore cgroup mode + EmptyNs uint32 // don't c/r properties for namespace from this mask +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go new file mode 100644 index 000000000..bc9207703 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go @@ -0,0 +1,6 @@ +package libcontainer + +// TODO Windows: This can ultimately be entirely factored out as criu is +// a Unix concept not relevant on Windows. +type CriuOpts struct { +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/error.go b/vendor/github.com/opencontainers/runc/libcontainer/error.go new file mode 100644 index 000000000..21a3789ba --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/error.go @@ -0,0 +1,70 @@ +package libcontainer + +import "io" + +// ErrorCode is the API error code type. +type ErrorCode int + +// API error codes. +const ( + // Factory errors + IdInUse ErrorCode = iota + InvalidIdFormat + + // Container errors + ContainerNotExists + ContainerPaused + ContainerNotStopped + ContainerNotRunning + ContainerNotPaused + + // Process errors + NoProcessOps + + // Common errors + ConfigInvalid + ConsoleExists + SystemError +) + +func (c ErrorCode) String() string { + switch c { + case IdInUse: + return "Id already in use" + case InvalidIdFormat: + return "Invalid format" + case ContainerPaused: + return "Container paused" + case ConfigInvalid: + return "Invalid configuration" + case SystemError: + return "System error" + case ContainerNotExists: + return "Container does not exist" + case ContainerNotStopped: + return "Container is not stopped" + case ContainerNotRunning: + return "Container is not running" + case ConsoleExists: + return "Console exists for process" + case ContainerNotPaused: + return "Container is not paused" + case NoProcessOps: + return "No process operations" + default: + return "Unknown error" + } +} + +// Error is the API error type. +type Error interface { + error + + // Returns an error if it failed to write the detail of the Error to w. + // The detail of the Error may include the error message and a + // representation of the stack trace. + Detail(w io.Writer) error + + // Returns the error code for this error. + Code() ErrorCode +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/factory.go b/vendor/github.com/opencontainers/runc/libcontainer/factory.go new file mode 100644 index 000000000..0986cd77e --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/factory.go @@ -0,0 +1,44 @@ +package libcontainer + +import ( + "github.com/opencontainers/runc/libcontainer/configs" +) + +type Factory interface { + // Creates a new container with the given id and starts the initial process inside it. + // id must be a string containing only letters, digits and underscores and must contain + // between 1 and 1024 characters, inclusive. + // + // The id must not already be in use by an existing container. Containers created using + // a factory with the same path (and filesystem) must have distinct ids. + // + // Returns the new container with a running process. + // + // errors: + // IdInUse - id is already in use by a container + // InvalidIdFormat - id has incorrect format + // ConfigInvalid - config is invalid + // Systemerror - System error + // + // On error, any partially created container parts are cleaned up (the operation is atomic). + Create(id string, config *configs.Config) (Container, error) + + // Load takes an ID for an existing container and returns the container information + // from the state. This presents a read only view of the container. + // + // errors: + // Path does not exist + // System error + Load(id string) (Container, error) + + // StartInitialization is an internal API to libcontainer used during the reexec of the + // container. + // + // Errors: + // Pipe connection error + // System error + StartInitialization() error + + // Type returns info string about factory type (e.g. lxc, libcontainer...) + Type() string +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go new file mode 100644 index 000000000..6a0f85583 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go @@ -0,0 +1,337 @@ +// +build linux + +package libcontainer + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "regexp" + "runtime/debug" + "strconv" + "syscall" + + "github.com/docker/docker/pkg/mount" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fs" + "github.com/opencontainers/runc/libcontainer/cgroups/rootless" + "github.com/opencontainers/runc/libcontainer/cgroups/systemd" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/configs/validate" + "github.com/opencontainers/runc/libcontainer/utils" +) + +const ( + stateFilename = "state.json" + execFifoFilename = "exec.fifo" +) + +var ( + idRegex = regexp.MustCompile(`^[\w+-\.]+$`) + maxIdLen = 1024 +) + +// InitArgs returns an options func to configure a LinuxFactory with the +// provided init binary path and arguments. +func InitArgs(args ...string) func(*LinuxFactory) error { + return func(l *LinuxFactory) (err error) { + if len(args) > 0 { + // Resolve relative paths to ensure that its available + // after directory changes. + if args[0], err = filepath.Abs(args[0]); err != nil { + return newGenericError(err, ConfigInvalid) + } + } + + l.InitArgs = args + return nil + } +} + +// SystemdCgroups is an options func to configure a LinuxFactory to return +// containers that use systemd to create and manage cgroups. +func SystemdCgroups(l *LinuxFactory) error { + l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { + return &systemd.Manager{ + Cgroups: config, + Paths: paths, + } + } + return nil +} + +// Cgroupfs is an options func to configure a LinuxFactory to return +// containers that use the native cgroups filesystem implementation to +// create and manage cgroups. +func Cgroupfs(l *LinuxFactory) error { + l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { + return &fs.Manager{ + Cgroups: config, + Paths: paths, + } + } + return nil +} + +// RootlessCgroups is an options func to configure a LinuxFactory to +// return containers that use the "rootless" cgroup manager, which will +// fail to do any operations not possible to do with an unprivileged user. +// It should only be used in conjunction with rootless containers. +func RootlessCgroups(l *LinuxFactory) error { + l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager { + return &rootless.Manager{ + Cgroups: config, + Paths: paths, + } + } + return nil +} + +// TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs. +func TmpfsRoot(l *LinuxFactory) error { + mounted, err := mount.Mounted(l.Root) + if err != nil { + return err + } + if !mounted { + if err := syscall.Mount("tmpfs", l.Root, "tmpfs", 0, ""); err != nil { + return err + } + } + return nil +} + +// CriuPath returns an option func to configure a LinuxFactory with the +// provided criupath +func CriuPath(criupath string) func(*LinuxFactory) error { + return func(l *LinuxFactory) error { + l.CriuPath = criupath + return nil + } +} + +// New returns a linux based container factory based in the root directory and +// configures the factory with the provided option funcs. +func New(root string, options ...func(*LinuxFactory) error) (Factory, error) { + if root != "" { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, newGenericError(err, SystemError) + } + } + l := &LinuxFactory{ + Root: root, + InitArgs: []string{"/proc/self/exe", "init"}, + Validator: validate.New(), + CriuPath: "criu", + } + Cgroupfs(l) + for _, opt := range options { + if err := opt(l); err != nil { + return nil, err + } + } + return l, nil +} + +// LinuxFactory implements the default factory interface for linux based systems. +type LinuxFactory struct { + // Root directory for the factory to store state. + Root string + + // InitArgs are arguments for calling the init responsibilities for spawning + // a container. + InitArgs []string + + // CriuPath is the path to the criu binary used for checkpoint and restore of + // containers. + CriuPath string + + // Validator provides validation to container configurations. + Validator validate.Validator + + // NewCgroupsManager returns an initialized cgroups manager for a single container. + NewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager +} + +func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) { + if l.Root == "" { + return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid) + } + if err := l.validateID(id); err != nil { + return nil, err + } + if err := l.Validator.Validate(config); err != nil { + return nil, newGenericError(err, ConfigInvalid) + } + uid, err := config.HostRootUID() + if err != nil { + return nil, newGenericError(err, SystemError) + } + gid, err := config.HostRootGID() + if err != nil { + return nil, newGenericError(err, SystemError) + } + containerRoot := filepath.Join(l.Root, id) + if _, err := os.Stat(containerRoot); err == nil { + return nil, newGenericError(fmt.Errorf("container with id exists: %v", id), IdInUse) + } else if !os.IsNotExist(err) { + return nil, newGenericError(err, SystemError) + } + if err := os.MkdirAll(containerRoot, 0711); err != nil { + return nil, newGenericError(err, SystemError) + } + if err := os.Chown(containerRoot, uid, gid); err != nil { + return nil, newGenericError(err, SystemError) + } + if config.Rootless { + RootlessCgroups(l) + } + c := &linuxContainer{ + id: id, + root: containerRoot, + config: config, + initArgs: l.InitArgs, + criuPath: l.CriuPath, + cgroupManager: l.NewCgroupsManager(config.Cgroups, nil), + } + c.state = &stoppedState{c: c} + return c, nil +} + +func (l *LinuxFactory) Load(id string) (Container, error) { + if l.Root == "" { + return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid) + } + containerRoot := filepath.Join(l.Root, id) + state, err := l.loadState(containerRoot, id) + if err != nil { + return nil, err + } + r := &nonChildProcess{ + processPid: state.InitProcessPid, + processStartTime: state.InitProcessStartTime, + fds: state.ExternalDescriptors, + } + // We have to use the RootlessManager. + if state.Rootless { + RootlessCgroups(l) + } + c := &linuxContainer{ + initProcess: r, + initProcessStartTime: state.InitProcessStartTime, + id: id, + config: &state.Config, + initArgs: l.InitArgs, + criuPath: l.CriuPath, + cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths), + root: containerRoot, + created: state.Created, + } + c.state = &loadedState{c: c} + if err := c.refreshState(); err != nil { + return nil, err + } + return c, nil +} + +func (l *LinuxFactory) Type() string { + return "libcontainer" +} + +// StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state +// This is a low level implementation detail of the reexec and should not be consumed externally +func (l *LinuxFactory) StartInitialization() (err error) { + var ( + pipefd, rootfd int + consoleSocket *os.File + envInitPipe = os.Getenv("_LIBCONTAINER_INITPIPE") + envStateDir = os.Getenv("_LIBCONTAINER_STATEDIR") + envConsole = os.Getenv("_LIBCONTAINER_CONSOLE") + ) + + // Get the INITPIPE. + pipefd, err = strconv.Atoi(envInitPipe) + if err != nil { + return fmt.Errorf("unable to convert _LIBCONTAINER_INITPIPE=%s to int: %s", envInitPipe, err) + } + + var ( + pipe = os.NewFile(uintptr(pipefd), "pipe") + it = initType(os.Getenv("_LIBCONTAINER_INITTYPE")) + ) + defer pipe.Close() + + // Only init processes have STATEDIR. + rootfd = -1 + if it == initStandard { + if rootfd, err = strconv.Atoi(envStateDir); err != nil { + return fmt.Errorf("unable to convert _LIBCONTAINER_STATEDIR=%s to int: %s", envStateDir, err) + } + } + + if envConsole != "" { + console, err := strconv.Atoi(envConsole) + if err != nil { + return fmt.Errorf("unable to convert _LIBCONTAINER_CONSOLE=%s to int: %s", envConsole, err) + } + consoleSocket = os.NewFile(uintptr(console), "console-socket") + defer consoleSocket.Close() + } + + // clear the current process's environment to clean any libcontainer + // specific env vars. + os.Clearenv() + + defer func() { + // We have an error during the initialization of the container's init, + // send it back to the parent process in the form of an initError. + if werr := utils.WriteJSON(pipe, syncT{procError}); werr != nil { + fmt.Fprintln(os.Stderr, err) + return + } + if werr := utils.WriteJSON(pipe, newSystemError(err)); werr != nil { + fmt.Fprintln(os.Stderr, err) + return + } + }() + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("panic from initialization: %v, %v", e, string(debug.Stack())) + } + }() + + i, err := newContainerInit(it, pipe, consoleSocket, rootfd) + if err != nil { + return err + } + + // If Init succeeds, syscall.Exec will not return, hence none of the defers will be called. + return i.Init() +} + +func (l *LinuxFactory) loadState(root, id string) (*State, error) { + f, err := os.Open(filepath.Join(root, stateFilename)) + if err != nil { + if os.IsNotExist(err) { + return nil, newGenericError(fmt.Errorf("container %q does not exist", id), ContainerNotExists) + } + return nil, newGenericError(err, SystemError) + } + defer f.Close() + var state *State + if err := json.NewDecoder(f).Decode(&state); err != nil { + return nil, newGenericError(err, SystemError) + } + return state, nil +} + +func (l *LinuxFactory) validateID(id string) error { + if !idRegex.MatchString(id) { + return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat) + } + if len(id) > maxIdLen { + return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat) + } + return nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go b/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go new file mode 100644 index 000000000..6e7de2fe7 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go @@ -0,0 +1,92 @@ +package libcontainer + +import ( + "fmt" + "io" + "text/template" + "time" + + "github.com/opencontainers/runc/libcontainer/stacktrace" +) + +var errorTemplate = template.Must(template.New("error").Parse(`Timestamp: {{.Timestamp}} +Code: {{.ECode}} +{{if .Message }} +Message: {{.Message}} +{{end}} +Frames:{{range $i, $frame := .Stack.Frames}} +--- +{{$i}}: {{$frame.Function}} +Package: {{$frame.Package}} +File: {{$frame.File}}@{{$frame.Line}}{{end}} +`)) + +func newGenericError(err error, c ErrorCode) Error { + if le, ok := err.(Error); ok { + return le + } + gerr := &genericError{ + Timestamp: time.Now(), + Err: err, + ECode: c, + Stack: stacktrace.Capture(1), + } + if err != nil { + gerr.Message = err.Error() + } + return gerr +} + +func newSystemError(err error) Error { + return createSystemError(err, "") +} + +func newSystemErrorWithCausef(err error, cause string, v ...interface{}) Error { + return createSystemError(err, fmt.Sprintf(cause, v...)) +} + +func newSystemErrorWithCause(err error, cause string) Error { + return createSystemError(err, cause) +} + +// createSystemError creates the specified error with the correct number of +// stack frames skipped. This is only to be called by the other functions for +// formatting the error. +func createSystemError(err error, cause string) Error { + gerr := &genericError{ + Timestamp: time.Now(), + Err: err, + ECode: SystemError, + Cause: cause, + Stack: stacktrace.Capture(2), + } + if err != nil { + gerr.Message = err.Error() + } + return gerr +} + +type genericError struct { + Timestamp time.Time + ECode ErrorCode + Err error `json:"-"` + Cause string + Message string + Stack stacktrace.Stacktrace +} + +func (e *genericError) Error() string { + if e.Cause == "" { + return e.Message + } + frame := e.Stack.Frames[0] + return fmt.Sprintf("%s:%d: %s caused %q", frame.File, frame.Line, e.Cause, e.Message) +} + +func (e *genericError) Code() ErrorCode { + return e.ECode +} + +func (e *genericError) Detail(w io.Writer) error { + return errorTemplate.Execute(w, e) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go new file mode 100644 index 000000000..99cc02cbd --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go @@ -0,0 +1,500 @@ +// +build linux + +package libcontainer + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + "strings" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/opencontainers/runc/libcontainer/utils" + "github.com/vishvananda/netlink" +) + +type initType string + +const ( + initSetns initType = "setns" + initStandard initType = "standard" +) + +type pid struct { + Pid int `json:"pid"` +} + +// network is an internal struct used to setup container networks. +type network struct { + configs.Network + + // TempVethPeerName is a unique temporary veth peer name that was placed into + // the container's namespace. + TempVethPeerName string `json:"temp_veth_peer_name"` +} + +// initConfig is used for transferring parameters from Exec() to Init() +type initConfig struct { + Args []string `json:"args"` + Env []string `json:"env"` + Cwd string `json:"cwd"` + Capabilities *configs.Capabilities `json:"capabilities"` + ProcessLabel string `json:"process_label"` + AppArmorProfile string `json:"apparmor_profile"` + NoNewPrivileges bool `json:"no_new_privileges"` + User string `json:"user"` + AdditionalGroups []string `json:"additional_groups"` + Config *configs.Config `json:"config"` + Networks []*network `json:"network"` + PassedFilesCount int `json:"passed_files_count"` + ContainerId string `json:"containerid"` + Rlimits []configs.Rlimit `json:"rlimits"` + CreateConsole bool `json:"create_console"` + Rootless bool `json:"rootless"` +} + +type initer interface { + Init() error +} + +func newContainerInit(t initType, pipe *os.File, consoleSocket *os.File, stateDirFD int) (initer, error) { + var config *initConfig + if err := json.NewDecoder(pipe).Decode(&config); err != nil { + return nil, err + } + if err := populateProcessEnvironment(config.Env); err != nil { + return nil, err + } + switch t { + case initSetns: + return &linuxSetnsInit{ + pipe: pipe, + consoleSocket: consoleSocket, + config: config, + }, nil + case initStandard: + return &linuxStandardInit{ + pipe: pipe, + consoleSocket: consoleSocket, + parentPid: syscall.Getppid(), + config: config, + stateDirFD: stateDirFD, + }, nil + } + return nil, fmt.Errorf("unknown init type %q", t) +} + +// populateProcessEnvironment loads the provided environment variables into the +// current processes's environment. +func populateProcessEnvironment(env []string) error { + for _, pair := range env { + p := strings.SplitN(pair, "=", 2) + if len(p) < 2 { + return fmt.Errorf("invalid environment '%v'", pair) + } + if err := os.Setenv(p[0], p[1]); err != nil { + return err + } + } + return nil +} + +// finalizeNamespace drops the caps, sets the correct user +// and working dir, and closes any leaked file descriptors +// before executing the command inside the namespace +func finalizeNamespace(config *initConfig) error { + // Ensure that all unwanted fds we may have accidentally + // inherited are marked close-on-exec so they stay out of the + // container + if err := utils.CloseExecFrom(config.PassedFilesCount + 3); err != nil { + return err + } + + capabilities := &configs.Capabilities{} + if config.Capabilities != nil { + capabilities = config.Capabilities + } else if config.Config.Capabilities != nil { + capabilities = config.Config.Capabilities + } + w, err := newContainerCapList(capabilities) + if err != nil { + return err + } + // drop capabilities in bounding set before changing user + if err := w.ApplyBoundingSet(); err != nil { + return err + } + // preserve existing capabilities while we change users + if err := system.SetKeepCaps(); err != nil { + return err + } + if err := setupUser(config); err != nil { + return err + } + if err := system.ClearKeepCaps(); err != nil { + return err + } + if err := w.ApplyCaps(); err != nil { + return err + } + if config.Cwd != "" { + if err := syscall.Chdir(config.Cwd); err != nil { + return fmt.Errorf("chdir to cwd (%q) set in config.json failed: %v", config.Cwd, err) + } + } + return nil +} + +// setupConsole sets up the console from inside the container, and sends the +// master pty fd to the config.Pipe (using cmsg). This is done to ensure that +// consoles are scoped to a container properly (see runc#814 and the many +// issues related to that). This has to be run *after* we've pivoted to the new +// rootfs (and the users' configuration is entirely set up). +func setupConsole(socket *os.File, config *initConfig, mount bool) error { + defer socket.Close() + // At this point, /dev/ptmx points to something that we would expect. We + // used to change the owner of the slave path, but since the /dev/pts mount + // can have gid=X set (at the users' option). So touching the owner of the + // slave PTY is not necessary, as the kernel will handle that for us. Note + // however, that setupUser (specifically fixStdioPermissions) *will* change + // the UID owner of the console to be the user the process will run as (so + // they can actually control their console). + console, err := newConsole() + if err != nil { + return err + } + // After we return from here, we don't need the console anymore. + defer console.Close() + + linuxConsole, ok := console.(*linuxConsole) + if !ok { + return fmt.Errorf("failed to cast console to *linuxConsole") + } + // Mount the console inside our rootfs. + if mount { + if err := linuxConsole.mount(); err != nil { + return err + } + } + // While we can access console.master, using the API is a good idea. + if err := utils.SendFd(socket, linuxConsole.File()); err != nil { + return err + } + // Now, dup over all the things. + return linuxConsole.dupStdio() +} + +// syncParentReady sends to the given pipe a JSON payload which indicates that +// the init is ready to Exec the child process. It then waits for the parent to +// indicate that it is cleared to Exec. +func syncParentReady(pipe io.ReadWriter) error { + // Tell parent. + if err := writeSync(pipe, procReady); err != nil { + return err + } + + // Wait for parent to give the all-clear. + if err := readSync(pipe, procRun); err != nil { + return err + } + + return nil +} + +// syncParentHooks sends to the given pipe a JSON payload which indicates that +// the parent should execute pre-start hooks. It then waits for the parent to +// indicate that it is cleared to resume. +func syncParentHooks(pipe io.ReadWriter) error { + // Tell parent. + if err := writeSync(pipe, procHooks); err != nil { + return err + } + + // Wait for parent to give the all-clear. + if err := readSync(pipe, procResume); err != nil { + return err + } + + return nil +} + +// setupUser changes the groups, gid, and uid for the user inside the container +func setupUser(config *initConfig) error { + // Set up defaults. + defaultExecUser := user.ExecUser{ + Uid: 0, + Gid: 0, + Home: "/", + } + + passwdPath, err := user.GetPasswdPath() + if err != nil { + return err + } + + groupPath, err := user.GetGroupPath() + if err != nil { + return err + } + + execUser, err := user.GetExecUserPath(config.User, &defaultExecUser, passwdPath, groupPath) + if err != nil { + return err + } + + var addGroups []int + if len(config.AdditionalGroups) > 0 { + addGroups, err = user.GetAdditionalGroupsPath(config.AdditionalGroups, groupPath) + if err != nil { + return err + } + } + + if config.Rootless { + if execUser.Uid != 0 { + return fmt.Errorf("cannot run as a non-root user in a rootless container") + } + + if execUser.Gid != 0 { + return fmt.Errorf("cannot run as a non-root group in a rootless container") + } + + // We cannot set any additional groups in a rootless container and thus we + // bail if the user asked us to do so. TODO: We currently can't do this + // earlier, but if libcontainer.Process.User was typesafe this might work. + if len(addGroups) > 0 { + return fmt.Errorf("cannot set any additional groups in a rootless container") + } + } + + // before we change to the container's user make sure that the processes STDIO + // is correctly owned by the user that we are switching to. + if err := fixStdioPermissions(config, execUser); err != nil { + return err + } + + // This isn't allowed in an unprivileged user namespace since Linux 3.19. + // There's nothing we can do about /etc/group entries, so we silently + // ignore setting groups here (since the user didn't explicitly ask us to + // set the group). + if !config.Rootless { + suppGroups := append(execUser.Sgids, addGroups...) + if err := syscall.Setgroups(suppGroups); err != nil { + return err + } + } + + if err := system.Setgid(execUser.Gid); err != nil { + return err + } + + if err := system.Setuid(execUser.Uid); err != nil { + return err + } + + // if we didn't get HOME already, set it based on the user's HOME + if envHome := os.Getenv("HOME"); envHome == "" { + if err := os.Setenv("HOME", execUser.Home); err != nil { + return err + } + } + return nil +} + +// fixStdioPermissions fixes the permissions of PID 1's STDIO within the container to the specified user. +// The ownership needs to match because it is created outside of the container and needs to be +// localized. +func fixStdioPermissions(config *initConfig, u *user.ExecUser) error { + var null syscall.Stat_t + if err := syscall.Stat("/dev/null", &null); err != nil { + return err + } + for _, fd := range []uintptr{ + os.Stdin.Fd(), + os.Stderr.Fd(), + os.Stdout.Fd(), + } { + var s syscall.Stat_t + if err := syscall.Fstat(int(fd), &s); err != nil { + return err + } + + // Skip chown of /dev/null if it was used as one of the STDIO fds. + if s.Rdev == null.Rdev { + continue + } + + // Skip chown if s.Gid is actually an unmapped gid in the host. While + // this is a bit dodgy if it just so happens that the console _is_ + // owned by overflow_gid, there's no way for us to disambiguate this as + // a userspace program. + if _, err := config.Config.HostGID(int(s.Gid)); err != nil { + continue + } + + // We only change the uid owner (as it is possible for the mount to + // prefer a different gid, and there's no reason for us to change it). + // The reason why we don't just leave the default uid=X mount setup is + // that users expect to be able to actually use their console. Without + // this code, you couldn't effectively run as a non-root user inside a + // container and also have a console set up. + if err := syscall.Fchown(int(fd), u.Uid, int(s.Gid)); err != nil { + return err + } + } + return nil +} + +// setupNetwork sets up and initializes any network interface inside the container. +func setupNetwork(config *initConfig) error { + for _, config := range config.Networks { + strategy, err := getStrategy(config.Type) + if err != nil { + return err + } + if err := strategy.initialize(config); err != nil { + return err + } + } + return nil +} + +func setupRoute(config *configs.Config) error { + for _, config := range config.Routes { + _, dst, err := net.ParseCIDR(config.Destination) + if err != nil { + return err + } + src := net.ParseIP(config.Source) + if src == nil { + return fmt.Errorf("Invalid source for route: %s", config.Source) + } + gw := net.ParseIP(config.Gateway) + if gw == nil { + return fmt.Errorf("Invalid gateway for route: %s", config.Gateway) + } + l, err := netlink.LinkByName(config.InterfaceName) + if err != nil { + return err + } + route := &netlink.Route{ + Scope: netlink.SCOPE_UNIVERSE, + Dst: dst, + Src: src, + Gw: gw, + LinkIndex: l.Attrs().Index, + } + if err := netlink.RouteAdd(route); err != nil { + return err + } + } + return nil +} + +func setupRlimits(limits []configs.Rlimit, pid int) error { + for _, rlimit := range limits { + if err := system.Prlimit(pid, rlimit.Type, syscall.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft}); err != nil { + return fmt.Errorf("error setting rlimit type %v: %v", rlimit.Type, err) + } + } + return nil +} + +const _P_PID = 1 + +type siginfo struct { + si_signo int32 + si_errno int32 + si_code int32 + // below here is a union; si_pid is the only field we use + si_pid int32 + // Pad to 128 bytes as detailed in blockUntilWaitable + pad [96]byte +} + +// isWaitable returns true if the process has exited false otherwise. +// Its based off blockUntilWaitable in src/os/wait_waitid.go +func isWaitable(pid int) (bool, error) { + si := &siginfo{} + _, _, e := syscall.Syscall6(syscall.SYS_WAITID, _P_PID, uintptr(pid), uintptr(unsafe.Pointer(si)), syscall.WEXITED|syscall.WNOWAIT|syscall.WNOHANG, 0, 0) + if e != 0 { + return false, os.NewSyscallError("waitid", e) + } + + return si.si_pid != 0, nil +} + +// isNoChildren returns true if err represents a syscall.ECHILD false otherwise +func isNoChildren(err error) bool { + switch err := err.(type) { + case syscall.Errno: + if err == syscall.ECHILD { + return true + } + case *os.SyscallError: + if err.Err == syscall.ECHILD { + return true + } + } + return false +} + +// signalAllProcesses freezes then iterates over all the processes inside the +// manager's cgroups sending the signal s to them. +// If s is SIGKILL then it will wait for each process to exit. +// For all other signals it will check if the process is ready to report its +// exit status and only if it is will a wait be performed. +func signalAllProcesses(m cgroups.Manager, s os.Signal) error { + var procs []*os.Process + if err := m.Freeze(configs.Frozen); err != nil { + logrus.Warn(err) + } + pids, err := m.GetAllPids() + if err != nil { + m.Freeze(configs.Thawed) + return err + } + for _, pid := range pids { + p, err := os.FindProcess(pid) + if err != nil { + logrus.Warn(err) + continue + } + procs = append(procs, p) + if err := p.Signal(s); err != nil { + logrus.Warn(err) + } + } + if err := m.Freeze(configs.Thawed); err != nil { + logrus.Warn(err) + } + + for _, p := range procs { + if s != syscall.SIGKILL { + if ok, err := isWaitable(p.Pid); err != nil { + if !isNoChildren(err) { + logrus.Warn("signalAllProcesses: ", p.Pid, err) + } + continue + } else if !ok { + // Not ready to report so don't wait + continue + } + } + + if _, err := p.Wait(); err != nil { + if !isNoChildren(err) { + logrus.Warn("wait: ", err) + } + } + } + return nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go new file mode 100644 index 000000000..bc725a227 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/message_linux.go @@ -0,0 +1,91 @@ +// +build linux + +package libcontainer + +import ( + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +// list of known message types we want to send to bootstrap program +// The number is randomly chosen to not conflict with known netlink types +const ( + InitMsg uint16 = 62000 + CloneFlagsAttr uint16 = 27281 + NsPathsAttr uint16 = 27282 + UidmapAttr uint16 = 27283 + GidmapAttr uint16 = 27284 + SetgroupAttr uint16 = 27285 + OomScoreAdjAttr uint16 = 27286 + RootlessAttr uint16 = 27287 + + // When syscall.NLA_HDRLEN is in gccgo, take this out. + syscall_NLA_HDRLEN = (syscall.SizeofNlAttr + syscall.NLA_ALIGNTO - 1) & ^(syscall.NLA_ALIGNTO - 1) +) + +type Int32msg struct { + Type uint16 + Value uint32 +} + +// Serialize serializes the message. +// Int32msg has the following representation +// | nlattr len | nlattr type | +// | uint32 value | +func (msg *Int32msg) Serialize() []byte { + buf := make([]byte, msg.Len()) + native := nl.NativeEndian() + native.PutUint16(buf[0:2], uint16(msg.Len())) + native.PutUint16(buf[2:4], msg.Type) + native.PutUint32(buf[4:8], msg.Value) + return buf +} + +func (msg *Int32msg) Len() int { + return syscall_NLA_HDRLEN + 4 +} + +// Bytemsg has the following representation +// | nlattr len | nlattr type | +// | value | pad | +type Bytemsg struct { + Type uint16 + Value []byte +} + +func (msg *Bytemsg) Serialize() []byte { + l := msg.Len() + buf := make([]byte, (l+syscall.NLA_ALIGNTO-1) & ^(syscall.NLA_ALIGNTO-1)) + native := nl.NativeEndian() + native.PutUint16(buf[0:2], uint16(l)) + native.PutUint16(buf[2:4], msg.Type) + copy(buf[4:], msg.Value) + return buf +} + +func (msg *Bytemsg) Len() int { + return syscall_NLA_HDRLEN + len(msg.Value) + 1 // null-terminated +} + +type Boolmsg struct { + Type uint16 + Value bool +} + +func (msg *Boolmsg) Serialize() []byte { + buf := make([]byte, msg.Len()) + native := nl.NativeEndian() + native.PutUint16(buf[0:2], uint16(msg.Len())) + native.PutUint16(buf[2:4], msg.Type) + if msg.Value { + buf[4] = 1 + } else { + buf[4] = 0 + } + return buf +} + +func (msg *Boolmsg) Len() int { + return syscall_NLA_HDRLEN + 1 +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go new file mode 100644 index 000000000..5075bee4d --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/network_linux.go @@ -0,0 +1,259 @@ +// +build linux + +package libcontainer + +import ( + "fmt" + "io/ioutil" + "net" + "path/filepath" + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/utils" + "github.com/vishvananda/netlink" +) + +var strategies = map[string]networkStrategy{ + "veth": &veth{}, + "loopback": &loopback{}, +} + +// networkStrategy represents a specific network configuration for +// a container's networking stack +type networkStrategy interface { + create(*network, int) error + initialize(*network) error + detach(*configs.Network) error + attach(*configs.Network) error +} + +// getStrategy returns the specific network strategy for the +// provided type. +func getStrategy(tpe string) (networkStrategy, error) { + s, exists := strategies[tpe] + if !exists { + return nil, fmt.Errorf("unknown strategy type %q", tpe) + } + return s, nil +} + +// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo. +func getNetworkInterfaceStats(interfaceName string) (*NetworkInterface, error) { + out := &NetworkInterface{Name: interfaceName} + // This can happen if the network runtime information is missing - possible if the + // container was created by an old version of libcontainer. + if interfaceName == "" { + return out, nil + } + type netStatsPair struct { + // Where to write the output. + Out *uint64 + // The network stats file to read. + File string + } + // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. + netStats := []netStatsPair{ + {Out: &out.RxBytes, File: "tx_bytes"}, + {Out: &out.RxPackets, File: "tx_packets"}, + {Out: &out.RxErrors, File: "tx_errors"}, + {Out: &out.RxDropped, File: "tx_dropped"}, + + {Out: &out.TxBytes, File: "rx_bytes"}, + {Out: &out.TxPackets, File: "rx_packets"}, + {Out: &out.TxErrors, File: "rx_errors"}, + {Out: &out.TxDropped, File: "rx_dropped"}, + } + for _, netStat := range netStats { + data, err := readSysfsNetworkStats(interfaceName, netStat.File) + if err != nil { + return nil, err + } + *(netStat.Out) = data + } + return out, nil +} + +// Reads the specified statistics available under /sys/class/net//statistics +func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) { + data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile)) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// loopback is a network strategy that provides a basic loopback device +type loopback struct { +} + +func (l *loopback) create(n *network, nspid int) error { + return nil +} + +func (l *loopback) initialize(config *network) error { + return netlink.LinkSetUp(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "lo"}}) +} + +func (l *loopback) attach(n *configs.Network) (err error) { + return nil +} + +func (l *loopback) detach(n *configs.Network) (err error) { + return nil +} + +// veth is a network strategy that uses a bridge and creates +// a veth pair, one that is attached to the bridge on the host and the other +// is placed inside the container's namespace +type veth struct { +} + +func (v *veth) detach(n *configs.Network) (err error) { + return netlink.LinkSetMaster(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: n.HostInterfaceName}}, nil) +} + +// attach a container network interface to an external network +func (v *veth) attach(n *configs.Network) (err error) { + brl, err := netlink.LinkByName(n.Bridge) + if err != nil { + return err + } + br, ok := brl.(*netlink.Bridge) + if !ok { + return fmt.Errorf("Wrong device type %T", brl) + } + host, err := netlink.LinkByName(n.HostInterfaceName) + if err != nil { + return err + } + + if err := netlink.LinkSetMaster(host, br); err != nil { + return err + } + if err := netlink.LinkSetMTU(host, n.Mtu); err != nil { + return err + } + if n.HairpinMode { + if err := netlink.LinkSetHairpin(host, true); err != nil { + return err + } + } + if err := netlink.LinkSetUp(host); err != nil { + return err + } + + return nil +} + +func (v *veth) create(n *network, nspid int) (err error) { + tmpName, err := v.generateTempPeerName() + if err != nil { + return err + } + n.TempVethPeerName = tmpName + if n.Bridge == "" { + return fmt.Errorf("bridge is not specified") + } + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: n.HostInterfaceName, + TxQLen: n.TxQueueLen, + }, + PeerName: n.TempVethPeerName, + } + if err := netlink.LinkAdd(veth); err != nil { + return err + } + defer func() { + if err != nil { + netlink.LinkDel(veth) + } + }() + if err := v.attach(&n.Network); err != nil { + return err + } + child, err := netlink.LinkByName(n.TempVethPeerName) + if err != nil { + return err + } + return netlink.LinkSetNsPid(child, nspid) +} + +func (v *veth) generateTempPeerName() (string, error) { + return utils.GenerateRandomName("veth", 7) +} + +func (v *veth) initialize(config *network) error { + peer := config.TempVethPeerName + if peer == "" { + return fmt.Errorf("peer is not specified") + } + child, err := netlink.LinkByName(peer) + if err != nil { + return err + } + if err := netlink.LinkSetDown(child); err != nil { + return err + } + if err := netlink.LinkSetName(child, config.Name); err != nil { + return err + } + // get the interface again after we changed the name as the index also changes. + if child, err = netlink.LinkByName(config.Name); err != nil { + return err + } + if config.MacAddress != "" { + mac, err := net.ParseMAC(config.MacAddress) + if err != nil { + return err + } + if err := netlink.LinkSetHardwareAddr(child, mac); err != nil { + return err + } + } + ip, err := netlink.ParseAddr(config.Address) + if err != nil { + return err + } + if err := netlink.AddrAdd(child, ip); err != nil { + return err + } + if config.IPv6Address != "" { + ip6, err := netlink.ParseAddr(config.IPv6Address) + if err != nil { + return err + } + if err := netlink.AddrAdd(child, ip6); err != nil { + return err + } + } + if err := netlink.LinkSetMTU(child, config.Mtu); err != nil { + return err + } + if err := netlink.LinkSetUp(child); err != nil { + return err + } + if config.Gateway != "" { + gw := net.ParseIP(config.Gateway) + if err := netlink.RouteAdd(&netlink.Route{ + Scope: netlink.SCOPE_UNIVERSE, + LinkIndex: child.Attrs().Index, + Gw: gw, + }); err != nil { + return err + } + } + if config.IPv6Gateway != "" { + gw := net.ParseIP(config.IPv6Gateway) + if err := netlink.RouteAdd(&netlink.Route{ + Scope: netlink.SCOPE_UNIVERSE, + LinkIndex: child.Attrs().Index, + Gw: gw, + }); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go new file mode 100644 index 000000000..839a50c55 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go @@ -0,0 +1,89 @@ +// +build linux + +package libcontainer + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +const oomCgroupName = "memory" + +type PressureLevel uint + +const ( + LowPressure PressureLevel = iota + MediumPressure + CriticalPressure +) + +func registerMemoryEvent(cgDir string, evName string, arg string) (<-chan struct{}, error) { + evFile, err := os.Open(filepath.Join(cgDir, evName)) + if err != nil { + return nil, err + } + fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) + if syserr != 0 { + evFile.Close() + return nil, syserr + } + + eventfd := os.NewFile(fd, "eventfd") + + eventControlPath := filepath.Join(cgDir, "cgroup.event_control") + data := fmt.Sprintf("%d %d %s", eventfd.Fd(), evFile.Fd(), arg) + if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil { + eventfd.Close() + evFile.Close() + return nil, err + } + ch := make(chan struct{}) + go func() { + defer func() { + close(ch) + eventfd.Close() + evFile.Close() + }() + buf := make([]byte, 8) + for { + if _, err := eventfd.Read(buf); err != nil { + return + } + // When a cgroup is destroyed, an event is sent to eventfd. + // So if the control path is gone, return instead of notifying. + if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) { + return + } + ch <- struct{}{} + } + }() + return ch, nil +} + +// notifyOnOOM returns channel on which you can expect event about OOM, +// if process died without OOM this channel will be closed. +func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) { + dir := paths[oomCgroupName] + if dir == "" { + return nil, fmt.Errorf("path %q missing", oomCgroupName) + } + + return registerMemoryEvent(dir, "memory.oom_control", "") +} + +func notifyMemoryPressure(paths map[string]string, level PressureLevel) (<-chan struct{}, error) { + dir := paths[oomCgroupName] + if dir == "" { + return nil, fmt.Errorf("path %q missing", oomCgroupName) + } + + if level > CriticalPressure { + return nil, fmt.Errorf("invalid pressure level %d", level) + } + + levelStr := []string{"low", "medium", "critical"}[level] + return registerMemoryEvent(dir, "memory.pressure_level", levelStr) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process.go b/vendor/github.com/opencontainers/runc/libcontainer/process.go new file mode 100644 index 000000000..f1ad08149 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/process.go @@ -0,0 +1,106 @@ +package libcontainer + +import ( + "fmt" + "io" + "math" + "os" + + "github.com/opencontainers/runc/libcontainer/configs" +) + +type processOperations interface { + wait() (*os.ProcessState, error) + signal(sig os.Signal) error + pid() int +} + +// Process specifies the configuration and IO for a process inside +// a container. +type Process struct { + // The command to be run followed by any arguments. + Args []string + + // Env specifies the environment variables for the process. + Env []string + + // User will set the uid and gid of the executing process running inside the container + // local to the container's user and group configuration. + User string + + // AdditionalGroups specifies the gids that should be added to supplementary groups + // in addition to those that the user belongs to. + AdditionalGroups []string + + // Cwd will change the processes current working directory inside the container's rootfs. + Cwd string + + // Stdin is a pointer to a reader which provides the standard input stream. + Stdin io.Reader + + // Stdout is a pointer to a writer which receives the standard output stream. + Stdout io.Writer + + // Stderr is a pointer to a writer which receives the standard error stream. + Stderr io.Writer + + // ExtraFiles specifies additional open files to be inherited by the container + ExtraFiles []*os.File + + // Capabilities specify the capabilities to keep when executing the process inside the container + // All capabilities not specified will be dropped from the processes capability mask + Capabilities *configs.Capabilities + + // AppArmorProfile specifies the profile to apply to the process and is + // changed at the time the process is execed + AppArmorProfile string + + // Label specifies the label to apply to the process. It is commonly used by selinux + Label string + + // NoNewPrivileges controls whether processes can gain additional privileges. + NoNewPrivileges *bool + + // Rlimits specifies the resource limits, such as max open files, to set in the container + // If Rlimits are not set, the container will inherit rlimits from the parent process + Rlimits []configs.Rlimit + + // ConsoleSocket provides the masterfd console. + ConsoleSocket *os.File + + ops processOperations +} + +// Wait waits for the process to exit. +// Wait releases any resources associated with the Process +func (p Process) Wait() (*os.ProcessState, error) { + if p.ops == nil { + return nil, newGenericError(fmt.Errorf("invalid process"), NoProcessOps) + } + return p.ops.wait() +} + +// Pid returns the process ID +func (p Process) Pid() (int, error) { + // math.MinInt32 is returned here, because it's invalid value + // for the kill() system call. + if p.ops == nil { + return math.MinInt32, newGenericError(fmt.Errorf("invalid process"), NoProcessOps) + } + return p.ops.pid(), nil +} + +// Signal sends a signal to the Process. +func (p Process) Signal(sig os.Signal) error { + if p.ops == nil { + return newGenericError(fmt.Errorf("invalid process"), NoProcessOps) + } + return p.ops.signal(sig) +} + +// IO holds the process's STDIO +type IO struct { + Stdin io.WriteCloser + Stdout io.ReadCloser + Stderr io.ReadCloser +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go new file mode 100644 index 000000000..bfe99551d --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go @@ -0,0 +1,483 @@ +// +build linux + +package libcontainer + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strconv" + "syscall" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/runc/libcontainer/utils" +) + +type parentProcess interface { + // pid returns the pid for the running process. + pid() int + + // start starts the process execution. + start() error + + // send a SIGKILL to the process and wait for the exit. + terminate() error + + // wait waits on the process returning the process state. + wait() (*os.ProcessState, error) + + // startTime returns the process start time. + startTime() (string, error) + + signal(os.Signal) error + + externalDescriptors() []string + + setExternalDescriptors(fds []string) +} + +type setnsProcess struct { + cmd *exec.Cmd + parentPipe *os.File + childPipe *os.File + cgroupPaths map[string]string + config *initConfig + fds []string + process *Process + bootstrapData io.Reader +} + +func (p *setnsProcess) startTime() (string, error) { + return system.GetProcessStartTime(p.pid()) +} + +func (p *setnsProcess) signal(sig os.Signal) error { + s, ok := sig.(syscall.Signal) + if !ok { + return errors.New("os: unsupported signal type") + } + return syscall.Kill(p.pid(), s) +} + +func (p *setnsProcess) start() (err error) { + defer p.parentPipe.Close() + err = p.cmd.Start() + p.childPipe.Close() + if err != nil { + return newSystemErrorWithCause(err, "starting setns process") + } + if p.bootstrapData != nil { + if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil { + return newSystemErrorWithCause(err, "copying bootstrap data to pipe") + } + } + if err = p.execSetns(); err != nil { + return newSystemErrorWithCause(err, "executing setns process") + } + // We can't join cgroups if we're in a rootless container. + if !p.config.Rootless && len(p.cgroupPaths) > 0 { + if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil { + return newSystemErrorWithCausef(err, "adding pid %d to cgroups", p.pid()) + } + } + // set rlimits, this has to be done here because we lose permissions + // to raise the limits once we enter a user-namespace + if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil { + return newSystemErrorWithCause(err, "setting rlimits for process") + } + if err := utils.WriteJSON(p.parentPipe, p.config); err != nil { + return newSystemErrorWithCause(err, "writing config to pipe") + } + + ierr := parseSync(p.parentPipe, func(sync *syncT) error { + switch sync.Type { + case procReady: + // This shouldn't happen. + panic("unexpected procReady in setns") + case procHooks: + // This shouldn't happen. + panic("unexpected procHooks in setns") + default: + return newSystemError(fmt.Errorf("invalid JSON payload from child")) + } + }) + + if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil { + return newSystemErrorWithCause(err, "calling shutdown on init pipe") + } + // Must be done after Shutdown so the child will exit and we can wait for it. + if ierr != nil { + p.wait() + return ierr + } + return nil +} + +// execSetns runs the process that executes C code to perform the setns calls +// because setns support requires the C process to fork off a child and perform the setns +// before the go runtime boots, we wait on the process to die and receive the child's pid +// over the provided pipe. +func (p *setnsProcess) execSetns() error { + status, err := p.cmd.Process.Wait() + if err != nil { + p.cmd.Wait() + return newSystemErrorWithCause(err, "waiting on setns process to finish") + } + if !status.Success() { + p.cmd.Wait() + return newSystemError(&exec.ExitError{ProcessState: status}) + } + var pid *pid + if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil { + p.cmd.Wait() + return newSystemErrorWithCause(err, "reading pid from init pipe") + } + process, err := os.FindProcess(pid.Pid) + if err != nil { + return err + } + p.cmd.Process = process + p.process.ops = p + return nil +} + +// terminate sends a SIGKILL to the forked process for the setns routine then waits to +// avoid the process becoming a zombie. +func (p *setnsProcess) terminate() error { + if p.cmd.Process == nil { + return nil + } + err := p.cmd.Process.Kill() + if _, werr := p.wait(); err == nil { + err = werr + } + return err +} + +func (p *setnsProcess) wait() (*os.ProcessState, error) { + err := p.cmd.Wait() + + // Return actual ProcessState even on Wait error + return p.cmd.ProcessState, err +} + +func (p *setnsProcess) pid() int { + return p.cmd.Process.Pid +} + +func (p *setnsProcess) externalDescriptors() []string { + return p.fds +} + +func (p *setnsProcess) setExternalDescriptors(newFds []string) { + p.fds = newFds +} + +type initProcess struct { + cmd *exec.Cmd + parentPipe *os.File + childPipe *os.File + config *initConfig + manager cgroups.Manager + container *linuxContainer + fds []string + process *Process + bootstrapData io.Reader + sharePidns bool + rootDir *os.File +} + +func (p *initProcess) pid() int { + return p.cmd.Process.Pid +} + +func (p *initProcess) externalDescriptors() []string { + return p.fds +} + +// execSetns runs the process that executes C code to perform the setns calls +// because setns support requires the C process to fork off a child and perform the setns +// before the go runtime boots, we wait on the process to die and receive the child's pid +// over the provided pipe. +// This is called by initProcess.start function +func (p *initProcess) execSetns() error { + status, err := p.cmd.Process.Wait() + if err != nil { + p.cmd.Wait() + return err + } + if !status.Success() { + p.cmd.Wait() + return &exec.ExitError{ProcessState: status} + } + var pid *pid + if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil { + p.cmd.Wait() + return err + } + process, err := os.FindProcess(pid.Pid) + if err != nil { + return err + } + p.cmd.Process = process + p.process.ops = p + return nil +} + +func (p *initProcess) start() error { + defer p.parentPipe.Close() + err := p.cmd.Start() + p.process.ops = p + p.childPipe.Close() + p.rootDir.Close() + if err != nil { + p.process.ops = nil + return newSystemErrorWithCause(err, "starting init process command") + } + if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil { + return newSystemErrorWithCause(err, "copying bootstrap data to pipe") + } + if err := p.execSetns(); err != nil { + return newSystemErrorWithCause(err, "running exec setns process for init") + } + // Save the standard descriptor names before the container process + // can potentially move them (e.g., via dup2()). If we don't do this now, + // we won't know at checkpoint time which file descriptor to look up. + fds, err := getPipeFds(p.pid()) + if err != nil { + return newSystemErrorWithCausef(err, "getting pipe fds for pid %d", p.pid()) + } + p.setExternalDescriptors(fds) + // Do this before syncing with child so that no children can escape the + // cgroup. We don't need to worry about not doing this and not being root + // because we'd be using the rootless cgroup manager in that case. + if err := p.manager.Apply(p.pid()); err != nil { + return newSystemErrorWithCause(err, "applying cgroup configuration for process") + } + defer func() { + if err != nil { + // TODO: should not be the responsibility to call here + p.manager.Destroy() + } + }() + if err := p.createNetworkInterfaces(); err != nil { + return newSystemErrorWithCause(err, "creating network interfaces") + } + if err := p.sendConfig(); err != nil { + return newSystemErrorWithCause(err, "sending config to init process") + } + var ( + sentRun bool + sentResume bool + ) + + ierr := parseSync(p.parentPipe, func(sync *syncT) error { + switch sync.Type { + case procReady: + if err := p.manager.Set(p.config.Config); err != nil { + return newSystemErrorWithCause(err, "setting cgroup config for ready process") + } + // set rlimits, this has to be done here because we lose permissions + // to raise the limits once we enter a user-namespace + if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil { + return newSystemErrorWithCause(err, "setting rlimits for ready process") + } + // call prestart hooks + if !p.config.Config.Namespaces.Contains(configs.NEWNS) { + if p.config.Config.Hooks != nil { + s := configs.HookState{ + Version: p.container.config.Version, + ID: p.container.id, + Pid: p.pid(), + Bundle: utils.SearchLabels(p.config.Config.Labels, "bundle"), + } + for i, hook := range p.config.Config.Hooks.Prestart { + if err := hook.Run(s); err != nil { + return newSystemErrorWithCausef(err, "running prestart hook %d", i) + } + } + } + } + // Sync with child. + if err := writeSync(p.parentPipe, procRun); err != nil { + return newSystemErrorWithCause(err, "writing syncT 'run'") + } + sentRun = true + case procHooks: + if p.config.Config.Hooks != nil { + s := configs.HookState{ + Version: p.container.config.Version, + ID: p.container.id, + Pid: p.pid(), + Bundle: utils.SearchLabels(p.config.Config.Labels, "bundle"), + } + for i, hook := range p.config.Config.Hooks.Prestart { + if err := hook.Run(s); err != nil { + return newSystemErrorWithCausef(err, "running prestart hook %d", i) + } + } + } + // Sync with child. + if err := writeSync(p.parentPipe, procResume); err != nil { + return newSystemErrorWithCause(err, "writing syncT 'resume'") + } + sentResume = true + default: + return newSystemError(fmt.Errorf("invalid JSON payload from child")) + } + + return nil + }) + + if !sentRun { + return newSystemErrorWithCause(ierr, "container init") + } + if p.config.Config.Namespaces.Contains(configs.NEWNS) && !sentResume { + return newSystemError(fmt.Errorf("could not synchronise after executing prestart hooks with container process")) + } + if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil { + return newSystemErrorWithCause(err, "shutting down init pipe") + } + + // Must be done after Shutdown so the child will exit and we can wait for it. + if ierr != nil { + p.wait() + return ierr + } + return nil +} + +func (p *initProcess) wait() (*os.ProcessState, error) { + err := p.cmd.Wait() + if err != nil { + return p.cmd.ProcessState, err + } + // we should kill all processes in cgroup when init is died if we use host PID namespace + if p.sharePidns { + signalAllProcesses(p.manager, syscall.SIGKILL) + } + return p.cmd.ProcessState, nil +} + +func (p *initProcess) terminate() error { + if p.cmd.Process == nil { + return nil + } + err := p.cmd.Process.Kill() + if _, werr := p.wait(); err == nil { + err = werr + } + return err +} + +func (p *initProcess) startTime() (string, error) { + return system.GetProcessStartTime(p.pid()) +} + +func (p *initProcess) sendConfig() error { + // send the config to the container's init process, we don't use JSON Encode + // here because there might be a problem in JSON decoder in some cases, see: + // https://github.com/docker/docker/issues/14203#issuecomment-174177790 + return utils.WriteJSON(p.parentPipe, p.config) +} + +func (p *initProcess) createNetworkInterfaces() error { + for _, config := range p.config.Config.Networks { + strategy, err := getStrategy(config.Type) + if err != nil { + return err + } + n := &network{ + Network: *config, + } + if err := strategy.create(n, p.pid()); err != nil { + return err + } + p.config.Networks = append(p.config.Networks, n) + } + return nil +} + +func (p *initProcess) signal(sig os.Signal) error { + s, ok := sig.(syscall.Signal) + if !ok { + return errors.New("os: unsupported signal type") + } + return syscall.Kill(p.pid(), s) +} + +func (p *initProcess) setExternalDescriptors(newFds []string) { + p.fds = newFds +} + +func getPipeFds(pid int) ([]string, error) { + fds := make([]string, 3) + + dirPath := filepath.Join("/proc", strconv.Itoa(pid), "/fd") + for i := 0; i < 3; i++ { + // XXX: This breaks if the path is not a valid symlink (which can + // happen in certain particularly unlucky mount namespace setups). + f := filepath.Join(dirPath, strconv.Itoa(i)) + target, err := os.Readlink(f) + if err != nil { + // Ignore permission errors, for rootless containers and other + // non-dumpable processes. if we can't get the fd for a particular + // file, there's not much we can do. + if os.IsPermission(err) { + continue + } + return fds, err + } + fds[i] = target + } + return fds, nil +} + +// InitializeIO creates pipes for use with the process's stdio and returns the +// opposite side for each. Do not use this if you want to have a pseudoterminal +// set up for you by libcontainer (TODO: fix that too). +// TODO: This is mostly unnecessary, and should be handled by clients. +func (p *Process) InitializeIO(rootuid, rootgid int) (i *IO, err error) { + var fds []uintptr + i = &IO{} + // cleanup in case of an error + defer func() { + if err != nil { + for _, fd := range fds { + syscall.Close(int(fd)) + } + } + }() + // STDIN + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + fds = append(fds, r.Fd(), w.Fd()) + p.Stdin, i.Stdin = r, w + // STDOUT + if r, w, err = os.Pipe(); err != nil { + return nil, err + } + fds = append(fds, r.Fd(), w.Fd()) + p.Stdout, i.Stdout = w, r + // STDERR + if r, w, err = os.Pipe(); err != nil { + return nil, err + } + fds = append(fds, r.Fd(), w.Fd()) + p.Stderr, i.Stderr = w, r + // change ownership of the pipes incase we are in a user namespace + for _, fd := range fds { + if err := syscall.Fchown(int(fd), rootuid, rootgid); err != nil { + return nil, err + } + } + return i, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/restored_process.go b/vendor/github.com/opencontainers/runc/libcontainer/restored_process.go new file mode 100644 index 000000000..a96f4ca5f --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/restored_process.go @@ -0,0 +1,122 @@ +// +build linux + +package libcontainer + +import ( + "fmt" + "os" + + "github.com/opencontainers/runc/libcontainer/system" +) + +func newRestoredProcess(pid int, fds []string) (*restoredProcess, error) { + var ( + err error + ) + proc, err := os.FindProcess(pid) + if err != nil { + return nil, err + } + started, err := system.GetProcessStartTime(pid) + if err != nil { + return nil, err + } + return &restoredProcess{ + proc: proc, + processStartTime: started, + fds: fds, + }, nil +} + +type restoredProcess struct { + proc *os.Process + processStartTime string + fds []string +} + +func (p *restoredProcess) start() error { + return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError) +} + +func (p *restoredProcess) pid() int { + return p.proc.Pid +} + +func (p *restoredProcess) terminate() error { + err := p.proc.Kill() + if _, werr := p.wait(); err == nil { + err = werr + } + return err +} + +func (p *restoredProcess) wait() (*os.ProcessState, error) { + // TODO: how do we wait on the actual process? + // maybe use --exec-cmd in criu + st, err := p.proc.Wait() + if err != nil { + return nil, err + } + return st, nil +} + +func (p *restoredProcess) startTime() (string, error) { + return p.processStartTime, nil +} + +func (p *restoredProcess) signal(s os.Signal) error { + return p.proc.Signal(s) +} + +func (p *restoredProcess) externalDescriptors() []string { + return p.fds +} + +func (p *restoredProcess) setExternalDescriptors(newFds []string) { + p.fds = newFds +} + +// nonChildProcess represents a process where the calling process is not +// the parent process. This process is created when a factory loads a container from +// a persisted state. +type nonChildProcess struct { + processPid int + processStartTime string + fds []string +} + +func (p *nonChildProcess) start() error { + return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError) +} + +func (p *nonChildProcess) pid() int { + return p.processPid +} + +func (p *nonChildProcess) terminate() error { + return newGenericError(fmt.Errorf("restored process cannot be terminated"), SystemError) +} + +func (p *nonChildProcess) wait() (*os.ProcessState, error) { + return nil, newGenericError(fmt.Errorf("restored process cannot be waited on"), SystemError) +} + +func (p *nonChildProcess) startTime() (string, error) { + return p.processStartTime, nil +} + +func (p *nonChildProcess) signal(s os.Signal) error { + proc, err := os.FindProcess(p.processPid) + if err != nil { + return err + } + return proc.Signal(s) +} + +func (p *nonChildProcess) externalDescriptors() []string { + return p.fds +} + +func (p *nonChildProcess) setExternalDescriptors(newFds []string) { + p.fds = newFds +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go new file mode 100644 index 000000000..d507373fa --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go @@ -0,0 +1,812 @@ +// +build linux + +package libcontainer + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/symlink" + "github.com/mrunalp/fileutils" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/system" + libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" + "github.com/opencontainers/selinux/go-selinux/label" +) + +const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV + +// needsSetupDev returns true if /dev needs to be set up. +func needsSetupDev(config *configs.Config) bool { + for _, m := range config.Mounts { + if m.Device == "bind" && libcontainerUtils.CleanPath(m.Destination) == "/dev" { + return false + } + } + return true +} + +// prepareRootfs sets up the devices, mount points, and filesystems for use +// inside a new mount namespace. It doesn't set anything as ro or pivot_root, +// because console setup happens inside the caller. You must call +// finalizeRootfs in order to finish the rootfs setup. +func prepareRootfs(pipe io.ReadWriter, config *configs.Config) (err error) { + if err := prepareRoot(config); err != nil { + return newSystemErrorWithCause(err, "preparing rootfs") + } + + setupDev := needsSetupDev(config) + for _, m := range config.Mounts { + for _, precmd := range m.PremountCmds { + if err := mountCmd(precmd); err != nil { + return newSystemErrorWithCause(err, "running premount command") + } + } + + if err := mountToRootfs(m, config.Rootfs, config.MountLabel); err != nil { + return newSystemErrorWithCausef(err, "mounting %q to rootfs %q at %q", m.Source, config.Rootfs, m.Destination) + } + + for _, postcmd := range m.PostmountCmds { + if err := mountCmd(postcmd); err != nil { + return newSystemErrorWithCause(err, "running postmount command") + } + } + } + + if setupDev { + if err := createDevices(config); err != nil { + return newSystemErrorWithCause(err, "creating device nodes") + } + if err := setupPtmx(config); err != nil { + return newSystemErrorWithCause(err, "setting up ptmx") + } + if err := setupDevSymlinks(config.Rootfs); err != nil { + return newSystemErrorWithCause(err, "setting up /dev symlinks") + } + } + + // Signal the parent to run the pre-start hooks. + // The hooks are run after the mounts are setup, but before we switch to the new + // root, so that the old root is still available in the hooks for any mount + // manipulations. + if err := syncParentHooks(pipe); err != nil { + return err + } + + // The reason these operations are done here rather than in finalizeRootfs + // is because the console-handling code gets quite sticky if we have to set + // up the console before doing the pivot_root(2). This is because the + // Console API has to also work with the ExecIn case, which means that the + // API must be able to deal with being inside as well as outside the + // container. It's just cleaner to do this here (at the expense of the + // operation not being perfectly split). + + if err := syscall.Chdir(config.Rootfs); err != nil { + return newSystemErrorWithCausef(err, "changing dir to %q", config.Rootfs) + } + + if config.NoPivotRoot { + err = msMoveRoot(config.Rootfs) + } else { + err = pivotRoot(config.Rootfs) + } + if err != nil { + return newSystemErrorWithCause(err, "jailing process inside rootfs") + } + + if setupDev { + if err := reOpenDevNull(); err != nil { + return newSystemErrorWithCause(err, "reopening /dev/null inside container") + } + } + + return nil +} + +// finalizeRootfs actually switches the root of the process and sets anything +// to ro if necessary. You must call prepareRootfs first. +func finalizeRootfs(config *configs.Config) (err error) { + // remount dev as ro if specified + for _, m := range config.Mounts { + if libcontainerUtils.CleanPath(m.Destination) == "/dev" { + if m.Flags&syscall.MS_RDONLY == syscall.MS_RDONLY { + if err := remountReadonly(m); err != nil { + return newSystemErrorWithCausef(err, "remounting %q as readonly", m.Destination) + } + } + break + } + } + + // set rootfs ( / ) as readonly + if config.Readonlyfs { + if err := setReadonly(); err != nil { + return newSystemErrorWithCause(err, "setting rootfs as readonly") + } + } + + syscall.Umask(0022) + return nil +} + +func mountCmd(cmd configs.Command) error { + command := exec.Command(cmd.Path, cmd.Args[:]...) + command.Env = cmd.Env + command.Dir = cmd.Dir + if out, err := command.CombinedOutput(); err != nil { + return fmt.Errorf("%#v failed: %s: %v", cmd, string(out), err) + } + return nil +} + +func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error { + var ( + dest = m.Destination + ) + if !strings.HasPrefix(dest, rootfs) { + dest = filepath.Join(rootfs, dest) + } + + switch m.Device { + case "proc", "sysfs": + if err := os.MkdirAll(dest, 0755); err != nil { + return err + } + // Selinux kernels do not support labeling of /proc or /sys + return mountPropagate(m, rootfs, "") + case "mqueue": + if err := os.MkdirAll(dest, 0755); err != nil { + return err + } + if err := mountPropagate(m, rootfs, mountLabel); err != nil { + // older kernels do not support labeling of /dev/mqueue + if err := mountPropagate(m, rootfs, ""); err != nil { + return err + } + return label.SetFileLabel(dest, mountLabel) + } + return nil + case "tmpfs": + copyUp := m.Extensions&configs.EXT_COPYUP == configs.EXT_COPYUP + tmpDir := "" + stat, err := os.Stat(dest) + if err != nil { + if err := os.MkdirAll(dest, 0755); err != nil { + return err + } + } + if copyUp { + tmpDir, err = ioutil.TempDir("/tmp", "runctmpdir") + if err != nil { + return newSystemErrorWithCause(err, "tmpcopyup: failed to create tmpdir") + } + defer os.RemoveAll(tmpDir) + m.Destination = tmpDir + } + if err := mountPropagate(m, rootfs, mountLabel); err != nil { + return err + } + if copyUp { + if err := fileutils.CopyDirectory(dest, tmpDir); err != nil { + errMsg := fmt.Errorf("tmpcopyup: failed to copy %s to %s: %v", dest, tmpDir, err) + if err1 := syscall.Unmount(tmpDir, syscall.MNT_DETACH); err1 != nil { + return newSystemErrorWithCausef(err1, "tmpcopyup: %v: failed to unmount", errMsg) + } + return errMsg + } + if err := syscall.Mount(tmpDir, dest, "", syscall.MS_MOVE, ""); err != nil { + errMsg := fmt.Errorf("tmpcopyup: failed to move mount %s to %s: %v", tmpDir, dest, err) + if err1 := syscall.Unmount(tmpDir, syscall.MNT_DETACH); err1 != nil { + return newSystemErrorWithCausef(err1, "tmpcopyup: %v: failed to unmount", errMsg) + } + return errMsg + } + } + if stat != nil { + if err = os.Chmod(dest, stat.Mode()); err != nil { + return err + } + } + return nil + case "bind": + stat, err := os.Stat(m.Source) + if err != nil { + // error out if the source of a bind mount does not exist as we will be + // unable to bind anything to it. + return err + } + // ensure that the destination of the bind mount is resolved of symlinks at mount time because + // any previous mounts can invalidate the next mount's destination. + // this can happen when a user specifies mounts within other mounts to cause breakouts or other + // evil stuff to try to escape the container's rootfs. + if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil { + return err + } + if err := checkMountDestination(rootfs, dest); err != nil { + return err + } + // update the mount with the correct dest after symlinks are resolved. + m.Destination = dest + if err := createIfNotExists(dest, stat.IsDir()); err != nil { + return err + } + if err := mountPropagate(m, rootfs, mountLabel); err != nil { + return err + } + // bind mount won't change mount options, we need remount to make mount options effective. + // first check that we have non-default options required before attempting a remount + if m.Flags&^(syscall.MS_REC|syscall.MS_REMOUNT|syscall.MS_BIND) != 0 { + // only remount if unique mount options are set + if err := remount(m, rootfs); err != nil { + return err + } + } + + if m.Relabel != "" { + if err := label.Validate(m.Relabel); err != nil { + return err + } + shared := label.IsShared(m.Relabel) + if err := label.Relabel(m.Source, mountLabel, shared); err != nil { + return err + } + } + case "cgroup": + binds, err := getCgroupMounts(m) + if err != nil { + return err + } + var merged []string + for _, b := range binds { + ss := filepath.Base(b.Destination) + if strings.Contains(ss, ",") { + merged = append(merged, ss) + } + } + tmpfs := &configs.Mount{ + Source: "tmpfs", + Device: "tmpfs", + Destination: m.Destination, + Flags: defaultMountFlags, + Data: "mode=755", + PropagationFlags: m.PropagationFlags, + } + if err := mountToRootfs(tmpfs, rootfs, mountLabel); err != nil { + return err + } + for _, b := range binds { + if err := mountToRootfs(b, rootfs, mountLabel); err != nil { + return err + } + } + for _, mc := range merged { + for _, ss := range strings.Split(mc, ",") { + // symlink(2) is very dumb, it will just shove the path into + // the link and doesn't do any checks or relative path + // conversion. Also, don't error out if the cgroup already exists. + if err := os.Symlink(mc, filepath.Join(rootfs, m.Destination, ss)); err != nil && !os.IsExist(err) { + return err + } + } + } + if m.Flags&syscall.MS_RDONLY != 0 { + // remount cgroup root as readonly + mcgrouproot := &configs.Mount{ + Source: m.Destination, + Device: "bind", + Destination: m.Destination, + Flags: defaultMountFlags | syscall.MS_RDONLY | syscall.MS_BIND, + } + if err := remount(mcgrouproot, rootfs); err != nil { + return err + } + } + default: + // ensure that the destination of the mount is resolved of symlinks at mount time because + // any previous mounts can invalidate the next mount's destination. + // this can happen when a user specifies mounts within other mounts to cause breakouts or other + // evil stuff to try to escape the container's rootfs. + var err error + if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil { + return err + } + if err := checkMountDestination(rootfs, dest); err != nil { + return err + } + // update the mount with the correct dest after symlinks are resolved. + m.Destination = dest + if err := os.MkdirAll(dest, 0755); err != nil { + return err + } + return mountPropagate(m, rootfs, mountLabel) + } + return nil +} + +func getCgroupMounts(m *configs.Mount) ([]*configs.Mount, error) { + mounts, err := cgroups.GetCgroupMounts(false) + if err != nil { + return nil, err + } + + cgroupPaths, err := cgroups.ParseCgroupFile("/proc/self/cgroup") + if err != nil { + return nil, err + } + + var binds []*configs.Mount + + for _, mm := range mounts { + dir, err := mm.GetOwnCgroup(cgroupPaths) + if err != nil { + return nil, err + } + relDir, err := filepath.Rel(mm.Root, dir) + if err != nil { + return nil, err + } + binds = append(binds, &configs.Mount{ + Device: "bind", + Source: filepath.Join(mm.Mountpoint, relDir), + Destination: filepath.Join(m.Destination, filepath.Base(mm.Mountpoint)), + Flags: syscall.MS_BIND | syscall.MS_REC | m.Flags, + PropagationFlags: m.PropagationFlags, + }) + } + + return binds, nil +} + +// checkMountDestination checks to ensure that the mount destination is not over the top of /proc. +// dest is required to be an abs path and have any symlinks resolved before calling this function. +func checkMountDestination(rootfs, dest string) error { + invalidDestinations := []string{ + "/proc", + } + // White list, it should be sub directories of invalid destinations + validDestinations := []string{ + // These entries can be bind mounted by files emulated by fuse, + // so commands like top, free displays stats in container. + "/proc/cpuinfo", + "/proc/diskstats", + "/proc/meminfo", + "/proc/stat", + "/proc/swaps", + "/proc/uptime", + "/proc/net/dev", + } + for _, valid := range validDestinations { + path, err := filepath.Rel(filepath.Join(rootfs, valid), dest) + if err != nil { + return err + } + if path == "." { + return nil + } + } + for _, invalid := range invalidDestinations { + path, err := filepath.Rel(filepath.Join(rootfs, invalid), dest) + if err != nil { + return err + } + if path == "." || !strings.HasPrefix(path, "..") { + return fmt.Errorf("%q cannot be mounted because it is located inside %q", dest, invalid) + } + } + return nil +} + +func setupDevSymlinks(rootfs string) error { + var links = [][2]string{ + {"/proc/self/fd", "/dev/fd"}, + {"/proc/self/fd/0", "/dev/stdin"}, + {"/proc/self/fd/1", "/dev/stdout"}, + {"/proc/self/fd/2", "/dev/stderr"}, + } + // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink + // in /dev if it exists in /proc. + if _, err := os.Stat("/proc/kcore"); err == nil { + links = append(links, [2]string{"/proc/kcore", "/dev/core"}) + } + for _, link := range links { + var ( + src = link[0] + dst = filepath.Join(rootfs, link[1]) + ) + if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) { + return fmt.Errorf("symlink %s %s %s", src, dst, err) + } + } + return nil +} + +// If stdin, stdout, and/or stderr are pointing to `/dev/null` in the parent's rootfs +// this method will make them point to `/dev/null` in this container's rootfs. This +// needs to be called after we chroot/pivot into the container's rootfs so that any +// symlinks are resolved locally. +func reOpenDevNull() error { + var stat, devNullStat syscall.Stat_t + file, err := os.OpenFile("/dev/null", os.O_RDWR, 0) + if err != nil { + return fmt.Errorf("Failed to open /dev/null - %s", err) + } + defer file.Close() + if err := syscall.Fstat(int(file.Fd()), &devNullStat); err != nil { + return err + } + for fd := 0; fd < 3; fd++ { + if err := syscall.Fstat(fd, &stat); err != nil { + return err + } + if stat.Rdev == devNullStat.Rdev { + // Close and re-open the fd. + if err := syscall.Dup3(int(file.Fd()), fd, 0); err != nil { + return err + } + } + } + return nil +} + +// Create the device nodes in the container. +func createDevices(config *configs.Config) error { + useBindMount := system.RunningInUserNS() || config.Namespaces.Contains(configs.NEWUSER) + oldMask := syscall.Umask(0000) + for _, node := range config.Devices { + // containers running in a user namespace are not allowed to mknod + // devices so we can just bind mount it from the host. + if err := createDeviceNode(config.Rootfs, node, useBindMount); err != nil { + syscall.Umask(oldMask) + return err + } + } + syscall.Umask(oldMask) + return nil +} + +func bindMountDeviceNode(dest string, node *configs.Device) error { + f, err := os.Create(dest) + if err != nil && !os.IsExist(err) { + return err + } + if f != nil { + f.Close() + } + return syscall.Mount(node.Path, dest, "bind", syscall.MS_BIND, "") +} + +// Creates the device node in the rootfs of the container. +func createDeviceNode(rootfs string, node *configs.Device, bind bool) error { + dest := filepath.Join(rootfs, node.Path) + if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil { + return err + } + + if bind { + return bindMountDeviceNode(dest, node) + } + if err := mknodDevice(dest, node); err != nil { + if os.IsExist(err) { + return nil + } else if os.IsPermission(err) { + return bindMountDeviceNode(dest, node) + } + return err + } + return nil +} + +func mknodDevice(dest string, node *configs.Device) error { + fileMode := node.FileMode + switch node.Type { + case 'c', 'u': + fileMode |= syscall.S_IFCHR + case 'b': + fileMode |= syscall.S_IFBLK + case 'p': + fileMode |= syscall.S_IFIFO + default: + return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path) + } + if err := syscall.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil { + return err + } + return syscall.Chown(dest, int(node.Uid), int(node.Gid)) +} + +func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { + for _, m := range mountinfo { + if m.Mountpoint == dir { + return m + } + } + return nil +} + +// Get the parent mount point of directory passed in as argument. Also return +// optional fields. +func getParentMount(rootfs string) (string, string, error) { + var path string + + mountinfos, err := mount.GetMounts() + if err != nil { + return "", "", err + } + + mountinfo := getMountInfo(mountinfos, rootfs) + if mountinfo != nil { + return rootfs, mountinfo.Optional, nil + } + + path = rootfs + for { + path = filepath.Dir(path) + + mountinfo = getMountInfo(mountinfos, path) + if mountinfo != nil { + return path, mountinfo.Optional, nil + } + + if path == "/" { + break + } + } + + // If we are here, we did not find parent mount. Something is wrong. + return "", "", fmt.Errorf("Could not find parent mount of %s", rootfs) +} + +// Make parent mount private if it was shared +func rootfsParentMountPrivate(rootfs string) error { + sharedMount := false + + parentMount, optionalOpts, err := getParentMount(rootfs) + if err != nil { + return err + } + + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } + } + + // Make parent mount PRIVATE if it was shared. It is needed for two + // reasons. First of all pivot_root() will fail if parent mount is + // shared. Secondly when we bind mount rootfs it will propagate to + // parent namespace and we don't want that to happen. + if sharedMount { + return syscall.Mount("", parentMount, "", syscall.MS_PRIVATE, "") + } + + return nil +} + +func prepareRoot(config *configs.Config) error { + flag := syscall.MS_SLAVE | syscall.MS_REC + if config.RootPropagation != 0 { + flag = config.RootPropagation + } + if err := syscall.Mount("", "/", "", uintptr(flag), ""); err != nil { + return err + } + + // Make parent mount private to make sure following bind mount does + // not propagate in other namespaces. Also it will help with kernel + // check pass in pivot_root. (IS_SHARED(new_mnt->mnt_parent)) + if err := rootfsParentMountPrivate(config.Rootfs); err != nil { + return err + } + + return syscall.Mount(config.Rootfs, config.Rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, "") +} + +func setReadonly() error { + return syscall.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "") +} + +func setupPtmx(config *configs.Config) error { + ptmx := filepath.Join(config.Rootfs, "dev/ptmx") + if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { + return err + } + if err := os.Symlink("pts/ptmx", ptmx); err != nil { + return fmt.Errorf("symlink dev ptmx %s", err) + } + return nil +} + +// pivotRoot will call pivot_root such that rootfs becomes the new root +// filesystem, and everything else is cleaned up. +func pivotRoot(rootfs string) error { + // While the documentation may claim otherwise, pivot_root(".", ".") is + // actually valid. What this results in is / being the new root but + // /proc/self/cwd being the old root. Since we can play around with the cwd + // with pivot_root this allows us to pivot without creating directories in + // the rootfs. Shout-outs to the LXC developers for giving us this idea. + + oldroot, err := syscall.Open("/", syscall.O_DIRECTORY|syscall.O_RDONLY, 0) + if err != nil { + return err + } + defer syscall.Close(oldroot) + + newroot, err := syscall.Open(rootfs, syscall.O_DIRECTORY|syscall.O_RDONLY, 0) + if err != nil { + return err + } + defer syscall.Close(newroot) + + // Change to the new root so that the pivot_root actually acts on it. + if err := syscall.Fchdir(newroot); err != nil { + return err + } + + if err := syscall.PivotRoot(".", "."); err != nil { + return fmt.Errorf("pivot_root %s", err) + } + + // Currently our "." is oldroot (according to the current kernel code). + // However, purely for safety, we will fchdir(oldroot) since there isn't + // really any guarantee from the kernel what /proc/self/cwd will be after a + // pivot_root(2). + + if err := syscall.Fchdir(oldroot); err != nil { + return err + } + + // Make oldroot rprivate to make sure our unmounts don't propagate to the + // host (and thus bork the machine). + if err := syscall.Mount("", ".", "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { + return err + } + // Preform the unmount. MNT_DETACH allows us to unmount /proc/self/cwd. + if err := syscall.Unmount(".", syscall.MNT_DETACH); err != nil { + return err + } + + // Switch back to our shiny new root. + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("chdir / %s", err) + } + return nil +} + +func msMoveRoot(rootfs string) error { + if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { + return err + } + if err := syscall.Chroot("."); err != nil { + return err + } + return syscall.Chdir("/") +} + +// createIfNotExists creates a file or a directory only if it does not already exist. +func createIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} + +// readonlyPath will make a path read only. +func readonlyPath(path string) error { + if err := syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + return syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "") +} + +// remountReadonly will remount an existing mount point and ensure that it is read-only. +func remountReadonly(m *configs.Mount) error { + var ( + dest = m.Destination + flags = m.Flags + ) + for i := 0; i < 5; i++ { + if err := syscall.Mount("", dest, "", uintptr(flags|syscall.MS_REMOUNT|syscall.MS_RDONLY), ""); err != nil { + switch err { + case syscall.EBUSY: + time.Sleep(100 * time.Millisecond) + continue + default: + return err + } + } + return nil + } + return fmt.Errorf("unable to mount %s as readonly max retries reached", dest) +} + +// maskPath masks the top of the specified path inside a container to avoid +// security issues from processes reading information from non-namespace aware +// mounts ( proc/kcore ). +// For files, maskPath bind mounts /dev/null over the top of the specified path. +// For directories, maskPath mounts read-only tmpfs over the top of the specified path. +func maskPath(path string) error { + if err := syscall.Mount("/dev/null", path, "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) { + if err == syscall.ENOTDIR { + return syscall.Mount("tmpfs", path, "tmpfs", syscall.MS_RDONLY, "") + } + return err + } + return nil +} + +// writeSystemProperty writes the value to a path under /proc/sys as determined from the key. +// For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward. +func writeSystemProperty(key, value string) error { + keyPath := strings.Replace(key, ".", "/", -1) + return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644) +} + +func remount(m *configs.Mount, rootfs string) error { + var ( + dest = m.Destination + ) + if !strings.HasPrefix(dest, rootfs) { + dest = filepath.Join(rootfs, dest) + } + if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags|syscall.MS_REMOUNT), ""); err != nil { + return err + } + return nil +} + +// Do the mount operation followed by additional mounts required to take care +// of propagation flags. +func mountPropagate(m *configs.Mount, rootfs string, mountLabel string) error { + var ( + dest = m.Destination + data = label.FormatMountLabel(m.Data, mountLabel) + flags = m.Flags + ) + if libcontainerUtils.CleanPath(dest) == "/dev" { + flags &= ^syscall.MS_RDONLY + } + + copyUp := m.Extensions&configs.EXT_COPYUP == configs.EXT_COPYUP + if !(copyUp || strings.HasPrefix(dest, rootfs)) { + dest = filepath.Join(rootfs, dest) + } + + if err := syscall.Mount(m.Source, dest, m.Device, uintptr(flags), data); err != nil { + return err + } + + for _, pflag := range m.PropagationFlags { + if err := syscall.Mount("", dest, "", uintptr(pflag), ""); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go new file mode 100644 index 000000000..c7bdb605a --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go @@ -0,0 +1,11 @@ +// +build linux,go1.5 + +package libcontainer + +import "syscall" + +// Set the GidMappingsEnableSetgroups member to true, so the process's +// setgroups proc entry wont be set to 'deny' if GidMappings are set +func enableSetgroups(sys *syscall.SysProcAttr) { + sys.GidMappingsEnableSetgroups = true +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go new file mode 100644 index 000000000..48cc0ae02 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go @@ -0,0 +1,63 @@ +// +build linux + +package libcontainer + +import ( + "fmt" + "os" + + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/keys" + "github.com/opencontainers/runc/libcontainer/seccomp" + "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/selinux/go-selinux/label" +) + +// linuxSetnsInit performs the container's initialization for running a new process +// inside an existing container. +type linuxSetnsInit struct { + pipe *os.File + consoleSocket *os.File + config *initConfig +} + +func (l *linuxSetnsInit) getSessionRingName() string { + return fmt.Sprintf("_ses.%s", l.config.ContainerId) +} + +func (l *linuxSetnsInit) Init() error { + if !l.config.Config.NoNewKeyring { + // do not inherit the parent's session keyring + if _, err := keys.JoinSessionKeyring(l.getSessionRingName()); err != nil { + return err + } + } + if l.config.CreateConsole { + if err := setupConsole(l.consoleSocket, l.config, false); err != nil { + return err + } + if err := system.Setctty(); err != nil { + return err + } + } + if l.config.NoNewPrivileges { + if err := system.Prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil { + return err + } + } + if l.config.Config.Seccomp != nil { + if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { + return err + } + } + if err := finalizeNamespace(l.config); err != nil { + return err + } + if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil { + return err + } + if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil { + return err + } + return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ()) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go new file mode 100644 index 000000000..ee6f19a72 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go @@ -0,0 +1,190 @@ +// +build linux + +package libcontainer + +import ( + "fmt" + "os" + "os/exec" + "syscall" + + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/keys" + "github.com/opencontainers/runc/libcontainer/seccomp" + "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/selinux/go-selinux/label" +) + +type linuxStandardInit struct { + pipe *os.File + consoleSocket *os.File + parentPid int + stateDirFD int + config *initConfig +} + +func (l *linuxStandardInit) getSessionRingParams() (string, uint32, uint32) { + var newperms uint32 + + if l.config.Config.Namespaces.Contains(configs.NEWUSER) { + // with user ns we need 'other' search permissions + newperms = 0x8 + } else { + // without user ns we need 'UID' search permissions + newperms = 0x80000 + } + + // create a unique per session container name that we can + // join in setns; however, other containers can also join it + return fmt.Sprintf("_ses.%s", l.config.ContainerId), 0xffffffff, newperms +} + +// PR_SET_NO_NEW_PRIVS isn't exposed in Golang so we define it ourselves copying the value +// the kernel +const PR_SET_NO_NEW_PRIVS = 0x26 + +func (l *linuxStandardInit) Init() error { + if !l.config.Config.NoNewKeyring { + ringname, keepperms, newperms := l.getSessionRingParams() + + // do not inherit the parent's session keyring + sessKeyId, err := keys.JoinSessionKeyring(ringname) + if err != nil { + return err + } + // make session keyring searcheable + if err := keys.ModKeyringPerm(sessKeyId, keepperms, newperms); err != nil { + return err + } + } + + if err := setupNetwork(l.config); err != nil { + return err + } + if err := setupRoute(l.config.Config); err != nil { + return err + } + + label.Init() + + // prepareRootfs() can be executed only for a new mount namespace. + if l.config.Config.Namespaces.Contains(configs.NEWNS) { + if err := prepareRootfs(l.pipe, l.config.Config); err != nil { + return err + } + } + + // Set up the console. This has to be done *before* we finalize the rootfs, + // but *after* we've given the user the chance to set up all of the mounts + // they wanted. + if l.config.CreateConsole { + if err := setupConsole(l.consoleSocket, l.config, true); err != nil { + return err + } + if err := system.Setctty(); err != nil { + return err + } + } + + // Finish the rootfs setup. + if l.config.Config.Namespaces.Contains(configs.NEWNS) { + if err := finalizeRootfs(l.config.Config); err != nil { + return err + } + } + + if hostname := l.config.Config.Hostname; hostname != "" { + if err := syscall.Sethostname([]byte(hostname)); err != nil { + return err + } + } + if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil { + return err + } + if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil { + return err + } + + for key, value := range l.config.Config.Sysctl { + if err := writeSystemProperty(key, value); err != nil { + return err + } + } + for _, path := range l.config.Config.ReadonlyPaths { + if err := readonlyPath(path); err != nil { + return err + } + } + for _, path := range l.config.Config.MaskPaths { + if err := maskPath(path); err != nil { + return err + } + } + pdeath, err := system.GetParentDeathSignal() + if err != nil { + return err + } + if l.config.NoNewPrivileges { + if err := system.Prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil { + return err + } + } + // Tell our parent that we're ready to Execv. This must be done before the + // Seccomp rules have been applied, because we need to be able to read and + // write to a socket. + if err := syncParentReady(l.pipe); err != nil { + return err + } + // Without NoNewPrivileges seccomp is a privileged operation, so we need to + // do this before dropping capabilities; otherwise do it as late as possible + // just before execve so as few syscalls take place after it as possible. + if l.config.Config.Seccomp != nil && !l.config.NoNewPrivileges { + if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { + return err + } + } + if err := finalizeNamespace(l.config); err != nil { + return err + } + // finalizeNamespace can change user/group which clears the parent death + // signal, so we restore it here. + if err := pdeath.Restore(); err != nil { + return err + } + // compare the parent from the initial start of the init process and make sure that it did not change. + // if the parent changes that means it died and we were reparented to something else so we should + // just kill ourself and not cause problems for someone else. + if syscall.Getppid() != l.parentPid { + return syscall.Kill(syscall.Getpid(), syscall.SIGKILL) + } + // check for the arg before waiting to make sure it exists and it is returned + // as a create time error. + name, err := exec.LookPath(l.config.Args[0]) + if err != nil { + return err + } + // close the pipe to signal that we have completed our init. + l.pipe.Close() + // wait for the fifo to be opened on the other side before + // exec'ing the users process. + fd, err := syscall.Openat(l.stateDirFD, execFifoFilename, os.O_WRONLY|syscall.O_CLOEXEC, 0) + if err != nil { + return newSystemErrorWithCause(err, "openat exec fifo") + } + if _, err := syscall.Write(fd, []byte("0")); err != nil { + return newSystemErrorWithCause(err, "write 0 exec fifo") + } + if l.config.Config.Seccomp != nil && l.config.NoNewPrivileges { + if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { + return newSystemErrorWithCause(err, "init seccomp") + } + } + // close the statedir fd before exec because the kernel resets dumpable in the wrong order + // https://github.com/torvalds/linux/blob/v4.9/fs/exec.c#L1290-L1318 + syscall.Close(l.stateDirFD) + if err := syscall.Exec(name, l.config.Args[0:], os.Environ()); err != nil { + return newSystemErrorWithCause(err, "exec user process") + } + return nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go new file mode 100644 index 000000000..62878acf0 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/state_linux.go @@ -0,0 +1,247 @@ +// +build linux + +package libcontainer + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/utils" +) + +func newStateTransitionError(from, to containerState) error { + return &stateTransitionError{ + From: from.status().String(), + To: to.status().String(), + } +} + +// stateTransitionError is returned when an invalid state transition happens from one +// state to another. +type stateTransitionError struct { + From string + To string +} + +func (s *stateTransitionError) Error() string { + return fmt.Sprintf("invalid state transition from %s to %s", s.From, s.To) +} + +type containerState interface { + transition(containerState) error + destroy() error + status() Status +} + +func destroy(c *linuxContainer) error { + if !c.config.Namespaces.Contains(configs.NEWPID) { + if err := signalAllProcesses(c.cgroupManager, syscall.SIGKILL); err != nil { + logrus.Warn(err) + } + } + err := c.cgroupManager.Destroy() + if rerr := os.RemoveAll(c.root); err == nil { + err = rerr + } + c.initProcess = nil + if herr := runPoststopHooks(c); err == nil { + err = herr + } + c.state = &stoppedState{c: c} + return err +} + +func runPoststopHooks(c *linuxContainer) error { + if c.config.Hooks != nil { + s := configs.HookState{ + Version: c.config.Version, + ID: c.id, + Bundle: utils.SearchLabels(c.config.Labels, "bundle"), + } + for _, hook := range c.config.Hooks.Poststop { + if err := hook.Run(s); err != nil { + return err + } + } + } + return nil +} + +// stoppedState represents a container is a stopped/destroyed state. +type stoppedState struct { + c *linuxContainer +} + +func (b *stoppedState) status() Status { + return Stopped +} + +func (b *stoppedState) transition(s containerState) error { + switch s.(type) { + case *runningState, *restoredState: + b.c.state = s + return nil + case *stoppedState: + return nil + } + return newStateTransitionError(b, s) +} + +func (b *stoppedState) destroy() error { + return destroy(b.c) +} + +// runningState represents a container that is currently running. +type runningState struct { + c *linuxContainer +} + +func (r *runningState) status() Status { + return Running +} + +func (r *runningState) transition(s containerState) error { + switch s.(type) { + case *stoppedState: + t, err := r.c.runType() + if err != nil { + return err + } + if t == Running { + return newGenericError(fmt.Errorf("container still running"), ContainerNotStopped) + } + r.c.state = s + return nil + case *pausedState: + r.c.state = s + return nil + case *runningState: + return nil + } + return newStateTransitionError(r, s) +} + +func (r *runningState) destroy() error { + t, err := r.c.runType() + if err != nil { + return err + } + if t == Running { + return newGenericError(fmt.Errorf("container is not destroyed"), ContainerNotStopped) + } + return destroy(r.c) +} + +type createdState struct { + c *linuxContainer +} + +func (i *createdState) status() Status { + return Created +} + +func (i *createdState) transition(s containerState) error { + switch s.(type) { + case *runningState, *pausedState, *stoppedState: + i.c.state = s + return nil + case *createdState: + return nil + } + return newStateTransitionError(i, s) +} + +func (i *createdState) destroy() error { + i.c.initProcess.signal(syscall.SIGKILL) + return destroy(i.c) +} + +// pausedState represents a container that is currently pause. It cannot be destroyed in a +// paused state and must transition back to running first. +type pausedState struct { + c *linuxContainer +} + +func (p *pausedState) status() Status { + return Paused +} + +func (p *pausedState) transition(s containerState) error { + switch s.(type) { + case *runningState, *stoppedState: + p.c.state = s + return nil + case *pausedState: + return nil + } + return newStateTransitionError(p, s) +} + +func (p *pausedState) destroy() error { + t, err := p.c.runType() + if err != nil { + return err + } + if t != Running && t != Created { + if err := p.c.cgroupManager.Freeze(configs.Thawed); err != nil { + return err + } + return destroy(p.c) + } + return newGenericError(fmt.Errorf("container is paused"), ContainerPaused) +} + +// restoredState is the same as the running state but also has associated checkpoint +// information that maybe need destroyed when the container is stopped and destroy is called. +type restoredState struct { + imageDir string + c *linuxContainer +} + +func (r *restoredState) status() Status { + return Running +} + +func (r *restoredState) transition(s containerState) error { + switch s.(type) { + case *stoppedState, *runningState: + return nil + } + return newStateTransitionError(r, s) +} + +func (r *restoredState) destroy() error { + if _, err := os.Stat(filepath.Join(r.c.root, "checkpoint")); err != nil { + if !os.IsNotExist(err) { + return err + } + } + return destroy(r.c) +} + +// loadedState is used whenever a container is restored, loaded, or setting additional +// processes inside and it should not be destroyed when it is exiting. +type loadedState struct { + c *linuxContainer + s Status +} + +func (n *loadedState) status() Status { + return n.s +} + +func (n *loadedState) transition(s containerState) error { + n.c.state = s + return nil +} + +func (n *loadedState) destroy() error { + if err := n.c.refreshState(); err != nil { + return err + } + return n.c.state.destroy() +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/stats.go new file mode 100644 index 000000000..303e4b94c --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/stats.go @@ -0,0 +1,15 @@ +package libcontainer + +type NetworkInterface struct { + // Name is the name of the network interface. + Name string + + RxBytes uint64 + RxPackets uint64 + RxErrors uint64 + RxDropped uint64 + TxBytes uint64 + TxPackets uint64 + TxErrors uint64 + TxDropped uint64 +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go new file mode 100644 index 000000000..f8d1d689c --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go @@ -0,0 +1,5 @@ +package libcontainer + +type Stats struct { + Interfaces []*NetworkInterface +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go new file mode 100644 index 000000000..c629dc67d --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go @@ -0,0 +1,8 @@ +package libcontainer + +import "github.com/opencontainers/runc/libcontainer/cgroups" + +type Stats struct { + Interfaces []*NetworkInterface + CgroupStats *cgroups.Stats +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go new file mode 100644 index 000000000..da78c1c2e --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go @@ -0,0 +1,7 @@ +package libcontainer + +// Solaris - TODO + +type Stats struct { + Interfaces []*NetworkInterface +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go new file mode 100644 index 000000000..f8d1d689c --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go @@ -0,0 +1,5 @@ +package libcontainer + +type Stats struct { + Interfaces []*NetworkInterface +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/sync.go b/vendor/github.com/opencontainers/runc/libcontainer/sync.go new file mode 100644 index 000000000..cf7b45bc3 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/sync.go @@ -0,0 +1,107 @@ +package libcontainer + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/opencontainers/runc/libcontainer/utils" +) + +type syncType string + +// Constants that are used for synchronisation between the parent and child +// during container setup. They come in pairs (with procError being a generic +// response which is followed by a &genericError). +// +// [ child ] <-> [ parent ] +// +// procHooks --> [run hooks] +// <-- procResume +// +// procConsole --> +// <-- procConsoleReq +// [send(fd)] --> [recv(fd)] +// <-- procConsoleAck +// +// procReady --> [final setup] +// <-- procRun +const ( + procError syncType = "procError" + procReady syncType = "procReady" + procRun syncType = "procRun" + procHooks syncType = "procHooks" + procResume syncType = "procResume" +) + +type syncT struct { + Type syncType `json:"type"` +} + +// writeSync is used to write to a synchronisation pipe. An error is returned +// if there was a problem writing the payload. +func writeSync(pipe io.Writer, sync syncType) error { + if err := utils.WriteJSON(pipe, syncT{sync}); err != nil { + return err + } + return nil +} + +// readSync is used to read from a synchronisation pipe. An error is returned +// if we got a genericError, the pipe was closed, or we got an unexpected flag. +func readSync(pipe io.Reader, expected syncType) error { + var procSync syncT + if err := json.NewDecoder(pipe).Decode(&procSync); err != nil { + if err == io.EOF { + return fmt.Errorf("parent closed synchronisation channel") + } + + if procSync.Type == procError { + var ierr genericError + + if err := json.NewDecoder(pipe).Decode(&ierr); err != nil { + return fmt.Errorf("failed reading error from parent: %v", err) + } + + return &ierr + } + + if procSync.Type != expected { + return fmt.Errorf("invalid synchronisation flag from parent") + } + } + return nil +} + +// parseSync runs the given callback function on each syncT received from the +// child. It will return once io.EOF is returned from the given pipe. +func parseSync(pipe io.Reader, fn func(*syncT) error) error { + dec := json.NewDecoder(pipe) + for { + var sync syncT + if err := dec.Decode(&sync); err != nil { + if err == io.EOF { + break + } + return err + } + + // We handle this case outside fn for cleanliness reasons. + var ierr *genericError + if sync.Type == procError { + if err := dec.Decode(&ierr); err != nil && err != io.EOF { + return newSystemErrorWithCause(err, "decoding proc error from init") + } + if ierr != nil { + return ierr + } + // Programmer error. + panic("No error following JSON procError payload.") + } + + if err := fn(&sync); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vulcand/oxy/stream/stream.go b/vendor/github.com/vulcand/oxy/stream/stream.go deleted file mode 100644 index 52263753b..000000000 --- a/vendor/github.com/vulcand/oxy/stream/stream.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -package stream provides http.Handler middleware that passes-through the entire request - -Stream works around several limitations caused by buffering implementations, but -also introduces certain risks. - -Workarounds for buffering limitations: -1. Streaming really large chunks of data (large file transfers, or streaming videos, -etc.) - -2. Streaming (chunking) sparse data. For example, an implementation might -send a health check or a heart beat over a long-lived connection. This -does not play well with buffering. - -Risks: -1. Connections could survive for very long periods of time. - -2. There is no easy way to enforce limits on size/time of a connection. - -Examples of a streaming middleware: - - // sample HTTP handler - handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - w.Write([]byte("hello")) - }) - - // Stream will literally pass through to the next handler without ANY buffering - // or validation of the data. - stream.New(handler) - -*/ -package stream - -import ( - "net/http" - - log "github.com/Sirupsen/logrus" - "github.com/vulcand/oxy/utils" -) - -const ( - // No limit by default - DefaultMaxBodyBytes = -1 -) - -// Stream is responsible for buffering requests and responses -// It buffers large requests and responses to disk, -type Stream struct { - maxRequestBodyBytes int64 - - maxResponseBodyBytes int64 - - retryPredicate hpredicate - - next http.Handler - errHandler utils.ErrorHandler -} - -// New returns a new streamer middleware. New() function supports optional functional arguments -func New(next http.Handler, setters ...optSetter) (*Stream, error) { - strm := &Stream{ - next: next, - - maxRequestBodyBytes: DefaultMaxBodyBytes, - - maxResponseBodyBytes: DefaultMaxBodyBytes, - } - for _, s := range setters { - if err := s(strm); err != nil { - return nil, err - } - } - return strm, nil -} - -type optSetter func(s *Stream) error - -// Wrap sets the next handler to be called by stream handler. -func (s *Stream) Wrap(next http.Handler) error { - s.next = next - return nil -} - -func (s *Stream) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if log.GetLevel() >= log.DebugLevel { - logEntry := log.WithField("Request", utils.DumpHttpRequest(req)) - logEntry.Debug("vulcand/oxy/stream: begin ServeHttp on request") - defer logEntry.Debug("vulcand/oxy/stream: competed ServeHttp on request") - } - - s.next.ServeHTTP(w, req) -} diff --git a/vendor/github.com/vulcand/oxy/stream/threshold.go b/vendor/github.com/vulcand/oxy/stream/threshold.go deleted file mode 100644 index 08d725415..000000000 --- a/vendor/github.com/vulcand/oxy/stream/threshold.go +++ /dev/null @@ -1,225 +0,0 @@ -package stream - -import ( - "fmt" - "net/http" - - "github.com/vulcand/predicate" -) - -func IsValidExpression(expr string) bool { - _, err := parseExpression(expr) - return err == nil -} - -type context struct { - r *http.Request - attempt int - responseCode int -} - -type hpredicate func(*context) bool - -// Parses expression in the go language into Failover predicates -func parseExpression(in string) (hpredicate, error) { - p, err := predicate.NewParser(predicate.Def{ - Operators: predicate.Operators{ - AND: and, - OR: or, - EQ: eq, - NEQ: neq, - LT: lt, - GT: gt, - LE: le, - GE: ge, - }, - Functions: map[string]interface{}{ - "RequestMethod": requestMethod, - "IsNetworkError": isNetworkError, - "Attempts": attempts, - "ResponseCode": responseCode, - }, - }) - if err != nil { - return nil, err - } - out, err := p.Parse(in) - if err != nil { - return nil, err - } - pr, ok := out.(hpredicate) - if !ok { - return nil, fmt.Errorf("expected predicate, got %T", out) - } - return pr, nil -} - -type toString func(c *context) string -type toInt func(c *context) int - -// RequestMethod returns mapper of the request to its method e.g. POST -func requestMethod() toString { - return func(c *context) string { - return c.r.Method - } -} - -// Attempts returns mapper of the request to the number of proxy attempts -func attempts() toInt { - return func(c *context) int { - return c.attempt - } -} - -// ResponseCode returns mapper of the request to the last response code, returns 0 if there was no response code. -func responseCode() toInt { - return func(c *context) int { - return c.responseCode - } -} - -// IsNetworkError returns a predicate that returns true if last attempt ended with network error. -func isNetworkError() hpredicate { - return func(c *context) bool { - return c.responseCode == http.StatusBadGateway || c.responseCode == http.StatusGatewayTimeout - } -} - -// and returns predicate by joining the passed predicates with logical 'and' -func and(fns ...hpredicate) hpredicate { - return func(c *context) bool { - for _, fn := range fns { - if !fn(c) { - return false - } - } - return true - } -} - -// or returns predicate by joining the passed predicates with logical 'or' -func or(fns ...hpredicate) hpredicate { - return func(c *context) bool { - for _, fn := range fns { - if fn(c) { - return true - } - } - return false - } -} - -// not creates negation of the passed predicate -func not(p hpredicate) hpredicate { - return func(c *context) bool { - return !p(c) - } -} - -// eq returns predicate that tests for equality of the value of the mapper and the constant -func eq(m interface{}, value interface{}) (hpredicate, error) { - switch mapper := m.(type) { - case toString: - return stringEQ(mapper, value) - case toInt: - return intEQ(mapper, value) - } - return nil, fmt.Errorf("unsupported argument: %T", m) -} - -// neq returns predicate that tests for inequality of the value of the mapper and the constant -func neq(m interface{}, value interface{}) (hpredicate, error) { - p, err := eq(m, value) - if err != nil { - return nil, err - } - return not(p), nil -} - -// lt returns predicate that tests that value of the mapper function is less than the constant -func lt(m interface{}, value interface{}) (hpredicate, error) { - switch mapper := m.(type) { - case toInt: - return intLT(mapper, value) - } - return nil, fmt.Errorf("unsupported argument: %T", m) -} - -// le returns predicate that tests that value of the mapper function is less or equal than the constant -func le(m interface{}, value interface{}) (hpredicate, error) { - l, err := lt(m, value) - if err != nil { - return nil, err - } - e, err := eq(m, value) - if err != nil { - return nil, err - } - return func(c *context) bool { - return l(c) || e(c) - }, nil -} - -// gt returns predicate that tests that value of the mapper function is greater than the constant -func gt(m interface{}, value interface{}) (hpredicate, error) { - switch mapper := m.(type) { - case toInt: - return intGT(mapper, value) - } - return nil, fmt.Errorf("unsupported argument: %T", m) -} - -// ge returns predicate that tests that value of the mapper function is less or equal than the constant -func ge(m interface{}, value interface{}) (hpredicate, error) { - g, err := gt(m, value) - if err != nil { - return nil, err - } - e, err := eq(m, value) - if err != nil { - return nil, err - } - return func(c *context) bool { - return g(c) || e(c) - }, nil -} - -func stringEQ(m toString, val interface{}) (hpredicate, error) { - value, ok := val.(string) - if !ok { - return nil, fmt.Errorf("expected string, got %T", val) - } - return func(c *context) bool { - return m(c) == value - }, nil -} - -func intEQ(m toInt, val interface{}) (hpredicate, error) { - value, ok := val.(int) - if !ok { - return nil, fmt.Errorf("expected int, got %T", val) - } - return func(c *context) bool { - return m(c) == value - }, nil -} - -func intLT(m toInt, val interface{}) (hpredicate, error) { - value, ok := val.(int) - if !ok { - return nil, fmt.Errorf("expected int, got %T", val) - } - return func(c *context) bool { - return m(c) < value - }, nil -} - -func intGT(m toInt, val interface{}) (hpredicate, error) { - value, ok := val.(int) - if !ok { - return nil, fmt.Errorf("expected int, got %T", val) - } - return func(c *context) bool { - return m(c) > value - }, nil -} diff --git a/vendor/golang.org/x/text/internal/gen/code.go b/vendor/golang.org/x/text/internal/gen/code.go new file mode 100644 index 000000000..d7031b694 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/code.go @@ -0,0 +1,351 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gen + +import ( + "bytes" + "encoding/gob" + "fmt" + "hash" + "hash/fnv" + "io" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// This file contains utilities for generating code. + +// TODO: other write methods like: +// - slices, maps, types, etc. + +// CodeWriter is a utility for writing structured code. It computes the content +// hash and size of written content. It ensures there are newlines between +// written code blocks. +type CodeWriter struct { + buf bytes.Buffer + Size int + Hash hash.Hash32 // content hash + gob *gob.Encoder + // For comments we skip the usual one-line separator if they are followed by + // a code block. + skipSep bool +} + +func (w *CodeWriter) Write(p []byte) (n int, err error) { + return w.buf.Write(p) +} + +// NewCodeWriter returns a new CodeWriter. +func NewCodeWriter() *CodeWriter { + h := fnv.New32() + return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} +} + +// WriteGoFile appends the buffer with the total size of all created structures +// and writes it as a Go file to the the given file with the given package name. +func (w *CodeWriter) WriteGoFile(filename, pkg string) { + f, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer f.Close() + if _, err = w.WriteGo(f, pkg); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo appends the buffer with the total size of all created structures and +// writes it as a Go file to the the given writer with the given package name. +func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) { + sz := w.Size + w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) + defer w.buf.Reset() + return WriteGo(out, pkg, w.buf.Bytes()) +} + +func (w *CodeWriter) printf(f string, x ...interface{}) { + fmt.Fprintf(w, f, x...) +} + +func (w *CodeWriter) insertSep() { + if w.skipSep { + w.skipSep = false + return + } + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n") +} + +// WriteComment writes a comment block. All line starts are prefixed with "//". +// Initial empty lines are gobbled. The indentation for the first line is +// stripped from consecutive lines. +func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { + s := fmt.Sprintf(comment, args...) + s = strings.Trim(s, "\n") + + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n// ") + w.skipSep = true + + // strip first indent level. + sep := "\n" + for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { + sep += s[:1] + } + + strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) + + w.printf("\n") +} + +func (w *CodeWriter) writeSizeInfo(size int) { + w.printf("// Size: %d bytes\n", size) +} + +// WriteConst writes a constant of the given name and value. +func (w *CodeWriter) WriteConst(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + + switch v.Type().Kind() { + case reflect.String: + w.printf("const %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + w.printf("\n") + default: + w.printf("const %s = %#v\n", name, x) + } +} + +// WriteVar writes a variable of the given name and value. +func (w *CodeWriter) WriteVar(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + oldSize := w.Size + sz := int(v.Type().Size()) + w.Size += sz + + switch v.Type().Kind() { + case reflect.String: + w.printf("var %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + case reflect.Struct: + w.gob.Encode(x) + fallthrough + case reflect.Slice, reflect.Array: + w.printf("var %s = ", name) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + default: + w.printf("var %s %s = ", name, typeName(x)) + w.gob.Encode(x) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + } + w.printf("\n") +} + +func (w *CodeWriter) writeValue(v reflect.Value) { + x := v.Interface() + switch v.Kind() { + case reflect.String: + w.WriteString(v.String()) + case reflect.Array: + // Don't double count: callers of WriteArray count on the size being + // added, so we need to discount it here. + w.Size -= int(v.Type().Size()) + w.writeSlice(x, true) + case reflect.Slice: + w.writeSlice(x, false) + case reflect.Struct: + w.printf("%s{\n", typeName(v.Interface())) + t := v.Type() + for i := 0; i < v.NumField(); i++ { + w.printf("%s: ", t.Field(i).Name) + w.writeValue(v.Field(i)) + w.printf(",\n") + } + w.printf("}") + default: + w.printf("%#v", x) + } +} + +// WriteString writes a string literal. +func (w *CodeWriter) WriteString(s string) { + s = strings.Replace(s, `\`, `\\`, -1) + io.WriteString(w.Hash, s) // content hash + w.Size += len(s) + + const maxInline = 40 + if len(s) <= maxInline { + w.printf("%q", s) + return + } + + // We will render the string as a multi-line string. + const maxWidth = 80 - 4 - len(`"`) - len(`" +`) + + // When starting on its own line, go fmt indents line 2+ an extra level. + n, max := maxWidth, maxWidth-4 + + // As per https://golang.org/issue/18078, the compiler has trouble + // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, + // for large N. We insert redundant, explicit parentheses to work around + // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 + + // ... + s127) + etc + (etc + ... + sN). + explicitParens, extraComment := len(s) > 128*1024, "" + if explicitParens { + w.printf(`(`) + extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" + } + + // Print "" +\n, if a string does not start on its own line. + b := w.buf.Bytes() + if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { + w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) + n, max = maxWidth, maxWidth + } + + w.printf(`"`) + + for sz, p, nLines := 0, 0, 0; p < len(s); { + var r rune + r, sz = utf8.DecodeRuneInString(s[p:]) + out := s[p : p+sz] + chars := 1 + if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { + switch sz { + case 1: + out = fmt.Sprintf("\\x%02x", s[p]) + case 2, 3: + out = fmt.Sprintf("\\u%04x", r) + case 4: + out = fmt.Sprintf("\\U%08x", r) + } + chars = len(out) + } + if n -= chars; n < 0 { + nLines++ + if explicitParens && nLines&63 == 63 { + w.printf("\") + (\"") + } + w.printf("\" +\n\"") + n = max - len(out) + } + w.printf("%s", out) + p += sz + } + w.printf(`"`) + if explicitParens { + w.printf(`)`) + } +} + +// WriteSlice writes a slice value. +func (w *CodeWriter) WriteSlice(x interface{}) { + w.writeSlice(x, false) +} + +// WriteArray writes an array value. +func (w *CodeWriter) WriteArray(x interface{}) { + w.writeSlice(x, true) +} + +func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { + v := reflect.ValueOf(x) + w.gob.Encode(v.Len()) + w.Size += v.Len() * int(v.Type().Elem().Size()) + name := typeName(x) + if isArray { + name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) + } + if isArray { + w.printf("%s{\n", name) + } else { + w.printf("%s{ // %d elements\n", name, v.Len()) + } + + switch kind := v.Type().Elem().Kind(); kind { + case reflect.String: + for _, s := range x.([]string) { + w.WriteString(s) + w.printf(",\n") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // nLine and nBlock are the number of elements per line and block. + nLine, nBlock, format := 8, 64, "%d," + switch kind { + case reflect.Uint8: + format = "%#02x," + case reflect.Uint16: + format = "%#04x," + case reflect.Uint32: + nLine, nBlock, format = 4, 32, "%#08x," + case reflect.Uint, reflect.Uint64: + nLine, nBlock, format = 4, 32, "%#016x," + case reflect.Int8: + nLine = 16 + } + n := nLine + for i := 0; i < v.Len(); i++ { + if i%nBlock == 0 && v.Len() > nBlock { + w.printf("// Entry %X - %X\n", i, i+nBlock-1) + } + x := v.Index(i).Interface() + w.gob.Encode(x) + w.printf(format, x) + if n--; n == 0 { + n = nLine + w.printf("\n") + } + } + w.printf("\n") + case reflect.Struct: + zero := reflect.Zero(v.Type().Elem()).Interface() + for i := 0; i < v.Len(); i++ { + x := v.Index(i).Interface() + w.gob.EncodeValue(v) + if !reflect.DeepEqual(zero, x) { + line := fmt.Sprintf("%#v,\n", x) + line = line[strings.IndexByte(line, '{'):] + w.printf("%d: ", i) + w.printf(line) + } + } + case reflect.Array: + for i := 0; i < v.Len(); i++ { + w.printf("%d: %#v,\n", i, v.Index(i).Interface()) + } + default: + panic("gen: slice elem type not supported") + } + w.printf("}") +} + +// WriteType writes a definition of the type of the given value and returns the +// type name. +func (w *CodeWriter) WriteType(x interface{}) string { + t := reflect.TypeOf(x) + w.printf("type %s struct {\n", t.Name()) + for i := 0; i < t.NumField(); i++ { + w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) + } + w.printf("}\n") + return t.Name() +} + +// typeName returns the name of the go type of x. +func typeName(x interface{}) string { + t := reflect.ValueOf(x).Type() + return strings.Replace(fmt.Sprint(t), "main.", "", 1) +} diff --git a/vendor/golang.org/x/text/internal/gen/gen.go b/vendor/golang.org/x/text/internal/gen/gen.go new file mode 100644 index 000000000..2acb0355a --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/gen.go @@ -0,0 +1,281 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gen contains common code for the various code generation tools in the +// text repository. Its usage ensures consistency between tools. +// +// This package defines command line flags that are common to most generation +// tools. The flags allow for specifying specific Unicode and CLDR versions +// in the public Unicode data repository (http://www.unicode.org/Public). +// +// A local Unicode data mirror can be set through the flag -local or the +// environment variable UNICODE_DIR. The former takes precedence. The local +// directory should follow the same structure as the public repository. +// +// IANA data can also optionally be mirrored by putting it in the iana directory +// rooted at the top of the local mirror. Beware, though, that IANA data is not +// versioned. So it is up to the developer to use the right version. +package gen // import "golang.org/x/text/internal/gen" + +import ( + "bytes" + "flag" + "fmt" + "go/build" + "go/format" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path" + "path/filepath" + "sync" + "unicode" + + "golang.org/x/text/unicode/cldr" +) + +var ( + url = flag.String("url", + "http://www.unicode.org/Public", + "URL of Unicode database directory") + iana = flag.String("iana", + "http://www.iana.org", + "URL of the IANA repository") + unicodeVersion = flag.String("unicode", + getEnv("UNICODE_VERSION", unicode.Version), + "unicode version to use") + cldrVersion = flag.String("cldr", + getEnv("CLDR_VERSION", cldr.Version), + "cldr version to use") +) + +func getEnv(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +// Init performs common initialization for a gen command. It parses the flags +// and sets up the standard logging parameters. +func Init() { + log.SetPrefix("") + log.SetFlags(log.Lshortfile) + flag.Parse() +} + +const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package %s + +` + +// UnicodeVersion reports the requested Unicode version. +func UnicodeVersion() string { + return *unicodeVersion +} + +// UnicodeVersion reports the requested CLDR version. +func CLDRVersion() string { + return *cldrVersion +} + +// IsLocal reports whether data files are available locally. +func IsLocal() bool { + dir, err := localReadmeFile() + if err != nil { + return false + } + if _, err = os.Stat(dir); err != nil { + return false + } + return true +} + +// OpenUCDFile opens the requested UCD file. The file is specified relative to +// the public Unicode root directory. It will call log.Fatal if there are any +// errors. +func OpenUCDFile(file string) io.ReadCloser { + return openUnicode(path.Join(*unicodeVersion, "ucd", file)) +} + +// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there +// are any errors. +func OpenCLDRCoreZip() io.ReadCloser { + return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") +} + +// OpenUnicodeFile opens the requested file of the requested category from the +// root of the Unicode data archive. The file is specified relative to the +// public Unicode root directory. If version is "", it will use the default +// Unicode version. It will call log.Fatal if there are any errors. +func OpenUnicodeFile(category, version, file string) io.ReadCloser { + if version == "" { + version = UnicodeVersion() + } + return openUnicode(path.Join(category, version, file)) +} + +// OpenIANAFile opens the requested IANA file. The file is specified relative +// to the IANA root, which is typically either http://www.iana.org or the +// iana directory in the local mirror. It will call log.Fatal if there are any +// errors. +func OpenIANAFile(path string) io.ReadCloser { + return Open(*iana, "iana", path) +} + +var ( + dirMutex sync.Mutex + localDir string +) + +const permissions = 0755 + +func localReadmeFile() (string, error) { + p, err := build.Import("golang.org/x/text", "", build.FindOnly) + if err != nil { + return "", fmt.Errorf("Could not locate package: %v", err) + } + return filepath.Join(p.Dir, "DATA", "README"), nil +} + +func getLocalDir() string { + dirMutex.Lock() + defer dirMutex.Unlock() + + readme, err := localReadmeFile() + if err != nil { + log.Fatal(err) + } + dir := filepath.Dir(readme) + if _, err := os.Stat(readme); err != nil { + if err := os.MkdirAll(dir, permissions); err != nil { + log.Fatalf("Could not create directory: %v", err) + } + ioutil.WriteFile(readme, []byte(readmeTxt), permissions) + } + return dir +} + +const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT. + +This directory contains downloaded files used to generate the various tables +in the golang.org/x/text subrepo. + +Note that the language subtag repo (iana/assignments/language-subtag-registry) +and all other times in the iana subdirectory are not versioned and will need +to be periodically manually updated. The easiest way to do this is to remove +the entire iana directory. This is mostly of concern when updating the language +package. +` + +// Open opens subdir/path if a local directory is specified and the file exists, +// where subdir is a directory relative to the local root, or fetches it from +// urlRoot/path otherwise. It will call log.Fatal if there are any errors. +func Open(urlRoot, subdir, path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path)) + return open(file, urlRoot, path) +} + +func openUnicode(path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), filepath.FromSlash(path)) + return open(file, *url, path) +} + +// TODO: automatically periodically update non-versioned files. + +func open(file, urlRoot, path string) io.ReadCloser { + if f, err := os.Open(file); err == nil { + return f + } + r := get(urlRoot, path) + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + log.Fatalf("Could not download file: %v", err) + } + os.MkdirAll(filepath.Dir(file), permissions) + if err := ioutil.WriteFile(file, b, permissions); err != nil { + log.Fatalf("Could not create file: %v", err) + } + return ioutil.NopCloser(bytes.NewReader(b)) +} + +func get(root, path string) io.ReadCloser { + url := root + "/" + path + fmt.Printf("Fetching %s...", url) + defer fmt.Println(" done.") + resp, err := http.Get(url) + if err != nil { + log.Fatalf("HTTP GET: %v", err) + } + if resp.StatusCode != 200 { + log.Fatalf("Bad GET status for %q: %q", url, resp.Status) + } + return resp.Body +} + +// TODO: use Write*Version in all applicable packages. + +// WriteUnicodeVersion writes a constant for the Unicode version from which the +// tables are generated. +func WriteUnicodeVersion(w io.Writer) { + fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) +} + +// WriteCLDRVersion writes a constant for the CLDR version from which the +// tables are generated. +func WriteCLDRVersion(w io.Writer) { + fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) +} + +// WriteGoFile prepends a standard file comment and package statement to the +// given bytes, applies gofmt, and writes them to a file with the given name. +// It will call log.Fatal if there are any errors. +func WriteGoFile(filename, pkg string, b []byte) { + w, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer w.Close() + if _, err = WriteGo(w, pkg, b); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo prepends a standard file comment and package statement to the given +// bytes, applies gofmt, and writes them to w. +func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) { + src := []byte(fmt.Sprintf(header, pkg)) + src = append(src, b...) + formatted, err := format.Source(src) + if err != nil { + // Print the generated code even in case of an error so that the + // returned error can be meaningfully interpreted. + n, _ = w.Write(src) + return n, err + } + return w.Write(formatted) +} + +// Repackage rewrites a Go file from belonging to package main to belonging to +// the given package. +func Repackage(inFile, outFile, pkg string) { + src, err := ioutil.ReadFile(inFile) + if err != nil { + log.Fatalf("reading %s: %v", inFile, err) + } + const toDelete = "package main\n\n" + i := bytes.Index(src, []byte(toDelete)) + if i < 0 { + log.Fatalf("Could not find %q in %s.", toDelete, inFile) + } + w := &bytes.Buffer{} + w.Write(src[i+len(toDelete):]) + WriteGoFile(outFile, pkg, w.Bytes()) +} diff --git a/vendor/golang.org/x/text/internal/triegen/compact.go b/vendor/golang.org/x/text/internal/triegen/compact.go new file mode 100644 index 000000000..397b975c1 --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/compact.go @@ -0,0 +1,58 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +// This file defines Compacter and its implementations. + +import "io" + +// A Compacter generates an alternative, more space-efficient way to store a +// trie value block. A trie value block holds all possible values for the last +// byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block +// always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0). +type Compacter interface { + // Size returns whether the Compacter could encode the given block as well + // as its size in case it can. len(v) is always 64. + Size(v []uint64) (sz int, ok bool) + + // Store stores the block using the Compacter's compression method. + // It returns a handle with which the block can be retrieved. + // len(v) is always 64. + Store(v []uint64) uint32 + + // Print writes the data structures associated to the given store to w. + Print(w io.Writer) error + + // Handler returns the name of a function that gets called during trie + // lookup for blocks generated by the Compacter. The function should be of + // the form func (n uint32, b byte) uint64, where n is the index returned by + // the Compacter's Store method and b is the last byte of the UTF-8 + // encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the + // block. + Handler() string +} + +// simpleCompacter is the default Compacter used by builder. It implements a +// normal trie block. +type simpleCompacter builder + +func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) { + return blockSize * b.ValueSize, true +} + +func (b *simpleCompacter) Store(v []uint64) uint32 { + h := uint32(len(b.ValueBlocks) - blockOffset) + b.ValueBlocks = append(b.ValueBlocks, v) + return h +} + +func (b *simpleCompacter) Print(io.Writer) error { + // Structures are printed in print.go. + return nil +} + +func (b *simpleCompacter) Handler() string { + panic("Handler should be special-cased for this Compacter") +} diff --git a/vendor/golang.org/x/text/internal/triegen/print.go b/vendor/golang.org/x/text/internal/triegen/print.go new file mode 100644 index 000000000..8d9f120bc --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/print.go @@ -0,0 +1,251 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// print writes all the data structures as well as the code necessary to use the +// trie to w. +func (b *builder) print(w io.Writer) error { + b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize + b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize + b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize + b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize + b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize + + // If we only have one root trie, all starter blocks are at position 0 and + // we can access the arrays directly. + if len(b.Trie) == 1 { + // At this point we cannot refer to the generated tables directly. + b.ASCIIBlock = b.Name + "Values" + b.StarterBlock = b.Name + "Index" + } else { + // Otherwise we need to have explicit starter indexes in the trie + // structure. + b.ASCIIBlock = "t.ascii" + b.StarterBlock = "t.utf8Start" + } + + b.SourceType = "[]byte" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + b.SourceType = "string" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + if err := trieGen.Execute(w, b); err != nil { + return err + } + + for _, c := range b.Compactions { + if err := c.c.Print(w); err != nil { + return err + } + } + + return nil +} + +func printValues(n int, values []uint64) string { + w := &bytes.Buffer{} + boff := n * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff) + var newline bool + for i, v := range values { + if i%6 == 0 { + newline = true + } + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v) + } + } + return w.String() +} + +func printIndex(b *builder, nr int, n *node) string { + w := &bytes.Buffer{} + boff := nr * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff) + var newline bool + for i, c := range n.children { + if i%8 == 0 { + newline = true + } + if c != nil { + v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index) + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v) + } + } + } + return w.String() +} + +var ( + trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{ + "printValues": printValues, + "printIndex": printIndex, + "title": strings.Title, + "dec": func(x int) int { return x - 1 }, + "psize": func(n int) string { + return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024) + }, + }).Parse(trieTemplate)) + lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate)) +) + +// TODO: consider the return type of lookup. It could be uint64, even if the +// internal value type is smaller. We will have to verify this with the +// performance of unicode/norm, which is very sensitive to such changes. +const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}} +// {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}. +type {{.Name}}Trie struct { {{if $multi}} + ascii []{{.ValueType}} // index for ASCII bytes + utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0 +{{end}}} + +func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}} + h := {{.Name}}TrieHandles[i] + return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] } +} + +type {{.Name}}TrieHandle struct { + ascii, multi {{.IndexType}} +} + +// {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes +var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{ +{{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}} +{{end}}}{{else}} + return &{{.Name}}Trie{} +} +{{end}} +// lookupValue determines the type of block n and looks up the value for b. +func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} { + switch { {{range $i, $c := .Compactions}} + {{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}} + n -= {{$c.Offset}}{{end}} + return {{print $b.ValueType}}({{$c.Handler}}){{end}} + } +} + +// {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes +// The third block is the zero block. +var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} { +{{range $i, $v := .ValueBlocks}}{{printValues $i $v}} +{{end}}} + +// {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes +// Block 0 is the zero block. +var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} { +{{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}} +{{end}}} +` + +// TODO: consider allowing zero-length strings after evaluating performance with +// unicode/norm. +const lookupTemplate = ` +// lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return {{.ASCIIBlock}}[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = {{.Name}}Index[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return {{.ASCIIBlock}}[c0] + } + i := {{.StarterBlock}}[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} +` diff --git a/vendor/golang.org/x/text/internal/triegen/triegen.go b/vendor/golang.org/x/text/internal/triegen/triegen.go new file mode 100644 index 000000000..adb010812 --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/triegen.go @@ -0,0 +1,494 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package triegen implements a code generator for a trie for associating +// unsigned integer values with UTF-8 encoded runes. +// +// Many of the go.text packages use tries for storing per-rune information. A +// trie is especially useful if many of the runes have the same value. If this +// is the case, many blocks can be expected to be shared allowing for +// information on many runes to be stored in little space. +// +// As most of the lookups are done directly on []byte slices, the tries use the +// UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to +// runes and contributes a little bit to better performance. It also naturally +// provides a fast path for ASCII. +// +// Space is also an issue. There are many code points defined in Unicode and as +// a result tables can get quite large. So every byte counts. The triegen +// package automatically chooses the smallest integer values to represent the +// tables. Compacters allow further compression of the trie by allowing for +// alternative representations of individual trie blocks. +// +// triegen allows generating multiple tries as a single structure. This is +// useful when, for example, one wants to generate tries for several languages +// that have a lot of values in common. Some existing libraries for +// internationalization store all per-language data as a dynamically loadable +// chunk. The go.text packages are designed with the assumption that the user +// typically wants to compile in support for all supported languages, in line +// with the approach common to Go to create a single standalone binary. The +// multi-root trie approach can give significant storage savings in this +// scenario. +// +// triegen generates both tables and code. The code is optimized to use the +// automatically chosen data types. The following code is generated for a Trie +// or multiple Tries named "foo": +// - type fooTrie +// The trie type. +// +// - func newFooTrie(x int) *fooTrie +// Trie constructor, where x is the index of the trie passed to Gen. +// +// - func (t *fooTrie) lookup(s []byte) (v uintX, sz int) +// The lookup method, where uintX is automatically chosen. +// +// - func lookupString, lookupUnsafe and lookupStringUnsafe +// Variants of the above. +// +// - var fooValues and fooIndex and any tables generated by Compacters. +// The core trie data. +// +// - var fooTrieHandles +// Indexes of starter blocks in case of multiple trie roots. +// +// It is recommended that users test the generated trie by checking the returned +// value for every rune. Such exhaustive tests are possible as the the number of +// runes in Unicode is limited. +package triegen // import "golang.org/x/text/internal/triegen" + +// TODO: Arguably, the internally optimized data types would not have to be +// exposed in the generated API. We could also investigate not generating the +// code, but using it through a package. We would have to investigate the impact +// on performance of making such change, though. For packages like unicode/norm, +// small changes like this could tank performance. + +import ( + "encoding/binary" + "fmt" + "hash/crc64" + "io" + "log" + "unicode/utf8" +) + +// builder builds a set of tries for associating values with runes. The set of +// tries can share common index and value blocks. +type builder struct { + Name string + + // ValueType is the type of the trie values looked up. + ValueType string + + // ValueSize is the byte size of the ValueType. + ValueSize int + + // IndexType is the type of trie index values used for all UTF-8 bytes of + // a rune except the last one. + IndexType string + + // IndexSize is the byte size of the IndexType. + IndexSize int + + // SourceType is used when generating the lookup functions. If the user + // requests StringSupport, all lookup functions will be generated for + // string input as well. + SourceType string + + Trie []*Trie + + IndexBlocks []*node + ValueBlocks [][]uint64 + Compactions []compaction + Checksum uint64 + + ASCIIBlock string + StarterBlock string + + indexBlockIdx map[uint64]int + valueBlockIdx map[uint64]nodeIndex + asciiBlockIdx map[uint64]int + + // Stats are used to fill out the template. + Stats struct { + NValueEntries int + NValueBytes int + NIndexEntries int + NIndexBytes int + NHandleBytes int + } + + err error +} + +// A nodeIndex encodes the index of a node, which is defined by the compaction +// which stores it and an index within the compaction. For internal nodes, the +// compaction is always 0. +type nodeIndex struct { + compaction int + index int +} + +// compaction keeps track of stats used for the compaction. +type compaction struct { + c Compacter + blocks []*node + maxHandle uint32 + totalSize int + + // Used by template-based generator and thus exported. + Cutoff uint32 + Offset uint32 + Handler string +} + +func (b *builder) setError(err error) { + if b.err == nil { + b.err = err + } +} + +// An Option can be passed to Gen. +type Option func(b *builder) error + +// Compact configures the trie generator to use the given Compacter. +func Compact(c Compacter) Option { + return func(b *builder) error { + b.Compactions = append(b.Compactions, compaction{ + c: c, + Handler: c.Handler() + "(n, b)"}) + return nil + } +} + +// Gen writes Go code for a shared trie lookup structure to w for the given +// Tries. The generated trie type will be called nameTrie. newNameTrie(x) will +// return the *nameTrie for tries[x]. A value can be looked up by using one of +// the various lookup methods defined on nameTrie. It returns the table size of +// the generated trie. +func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) { + // The index contains two dummy blocks, followed by the zero block. The zero + // block is at offset 0x80, so that the offset for the zero block for + // continuation bytes is 0. + b := &builder{ + Name: name, + Trie: tries, + IndexBlocks: []*node{{}, {}, {}}, + Compactions: []compaction{{ + Handler: name + "Values[n<<6+uint32(b)]", + }}, + // The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero + // block. + indexBlockIdx: map[uint64]int{0: 0}, + valueBlockIdx: map[uint64]nodeIndex{0: {}}, + asciiBlockIdx: map[uint64]int{}, + } + b.Compactions[0].c = (*simpleCompacter)(b) + + for _, f := range opts { + if err := f(b); err != nil { + return 0, err + } + } + b.build() + if b.err != nil { + return 0, b.err + } + if err = b.print(w); err != nil { + return 0, err + } + return b.Size(), nil +} + +// A Trie represents a single root node of a trie. A builder may build several +// overlapping tries at once. +type Trie struct { + root *node + + hiddenTrie +} + +// hiddenTrie contains values we want to be visible to the template generator, +// but hidden from the API documentation. +type hiddenTrie struct { + Name string + Checksum uint64 + ASCIIIndex int + StarterIndex int +} + +// NewTrie returns a new trie root. +func NewTrie(name string) *Trie { + return &Trie{ + &node{ + children: make([]*node, blockSize), + values: make([]uint64, utf8.RuneSelf), + }, + hiddenTrie{Name: name}, + } +} + +// Gen is a convenience wrapper around the Gen func passing t as the only trie +// and uses the name passed to NewTrie. It returns the size of the generated +// tables. +func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) { + return Gen(w, t.Name, []*Trie{t}, opts...) +} + +// node is a node of the intermediate trie structure. +type node struct { + // children holds this node's children. It is always of length 64. + // A child node may be nil. + children []*node + + // values contains the values of this node. If it is non-nil, this node is + // either a root or leaf node: + // For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F]. + // For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF]. + values []uint64 + + index nodeIndex +} + +// Insert associates value with the given rune. Insert will panic if a non-zero +// value is passed for an invalid rune. +func (t *Trie) Insert(r rune, value uint64) { + if value == 0 { + return + } + s := string(r) + if []rune(s)[0] != r && value != 0 { + // Note: The UCD tables will always assign what amounts to a zero value + // to a surrogate. Allowing a zero value for an illegal rune allows + // users to iterate over [0..MaxRune] without having to explicitly + // exclude surrogates, which would be tedious. + panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r)) + } + if len(s) == 1 { + // It is a root node value (ASCII). + t.root.values[s[0]] = value + return + } + + n := t.root + for ; len(s) > 1; s = s[1:] { + if n.children == nil { + n.children = make([]*node, blockSize) + } + p := s[0] % blockSize + c := n.children[p] + if c == nil { + c = &node{} + n.children[p] = c + } + if len(s) > 2 && c.values != nil { + log.Fatalf("triegen: insert(%U): found internal node with values", r) + } + n = c + } + if n.values == nil { + n.values = make([]uint64, blockSize) + } + if n.children != nil { + log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r) + } + n.values[s[0]-0x80] = value +} + +// Size returns the number of bytes the generated trie will take to store. It +// needs to be exported as it is used in the templates. +func (b *builder) Size() int { + // Index blocks. + sz := len(b.IndexBlocks) * blockSize * b.IndexSize + + // Skip the first compaction, which represents the normal value blocks, as + // its totalSize does not account for the ASCII blocks, which are managed + // separately. + sz += len(b.ValueBlocks) * blockSize * b.ValueSize + for _, c := range b.Compactions[1:] { + sz += c.totalSize + } + + // TODO: this computation does not account for the fixed overhead of a using + // a compaction, either code or data. As for data, though, the typical + // overhead of data is in the order of bytes (2 bytes for cases). Further, + // the savings of using a compaction should anyway be substantial for it to + // be worth it. + + // For multi-root tries, we also need to account for the handles. + if len(b.Trie) > 1 { + sz += 2 * b.IndexSize * len(b.Trie) + } + return sz +} + +func (b *builder) build() { + // Compute the sizes of the values. + var vmax uint64 + for _, t := range b.Trie { + vmax = maxValue(t.root, vmax) + } + b.ValueType, b.ValueSize = getIntType(vmax) + + // Compute all block allocations. + // TODO: first compute the ASCII blocks for all tries and then the other + // nodes. ASCII blocks are more restricted in placement, as they require two + // blocks to be placed consecutively. Processing them first may improve + // sharing (at least one zero block can be expected to be saved.) + for _, t := range b.Trie { + b.Checksum += b.buildTrie(t) + } + + // Compute the offsets for all the Compacters. + offset := uint32(0) + for i := range b.Compactions { + c := &b.Compactions[i] + c.Offset = offset + offset += c.maxHandle + 1 + c.Cutoff = offset + } + + // Compute the sizes of indexes. + // TODO: different byte positions could have different sizes. So far we have + // not found a case where this is beneficial. + imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff) + for _, ib := range b.IndexBlocks { + if x := uint64(ib.index.index); x > imax { + imax = x + } + } + b.IndexType, b.IndexSize = getIntType(imax) +} + +func maxValue(n *node, max uint64) uint64 { + if n == nil { + return max + } + for _, c := range n.children { + max = maxValue(c, max) + } + for _, v := range n.values { + if max < v { + max = v + } + } + return max +} + +func getIntType(v uint64) (string, int) { + switch { + case v < 1<<8: + return "uint8", 1 + case v < 1<<16: + return "uint16", 2 + case v < 1<<32: + return "uint32", 4 + } + return "uint64", 8 +} + +const ( + blockSize = 64 + + // Subtract two blocks to offset 0x80, the first continuation byte. + blockOffset = 2 + + // Subtract three blocks to offset 0xC0, the first non-ASCII starter. + rootBlockOffset = 3 +) + +var crcTable = crc64.MakeTable(crc64.ISO) + +func (b *builder) buildTrie(t *Trie) uint64 { + n := t.root + + // Get the ASCII offset. For the first trie, the ASCII block will be at + // position 0. + hasher := crc64.New(crcTable) + binary.Write(hasher, binary.BigEndian, n.values) + hash := hasher.Sum64() + + v, ok := b.asciiBlockIdx[hash] + if !ok { + v = len(b.ValueBlocks) + b.asciiBlockIdx[hash] = v + + b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:]) + if v == 0 { + // Add the zero block at position 2 so that it will be assigned a + // zero reference in the lookup blocks. + // TODO: always do this? This would allow us to remove a check from + // the trie lookup, but at the expense of extra space. Analyze + // performance for unicode/norm. + b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize)) + } + } + t.ASCIIIndex = v + + // Compute remaining offsets. + t.Checksum = b.computeOffsets(n, true) + // We already subtracted the normal blockOffset from the index. Subtract the + // difference for starter bytes. + t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset) + return t.Checksum +} + +func (b *builder) computeOffsets(n *node, root bool) uint64 { + // For the first trie, the root lookup block will be at position 3, which is + // the offset for UTF-8 non-ASCII starter bytes. + first := len(b.IndexBlocks) == rootBlockOffset + if first { + b.IndexBlocks = append(b.IndexBlocks, n) + } + + // We special-case the cases where all values recursively are 0. This allows + // for the use of a zero block to which all such values can be directed. + hash := uint64(0) + if n.children != nil || n.values != nil { + hasher := crc64.New(crcTable) + for _, c := range n.children { + var v uint64 + if c != nil { + v = b.computeOffsets(c, false) + } + binary.Write(hasher, binary.BigEndian, v) + } + binary.Write(hasher, binary.BigEndian, n.values) + hash = hasher.Sum64() + } + + if first { + b.indexBlockIdx[hash] = rootBlockOffset - blockOffset + } + + // Compacters don't apply to internal nodes. + if n.children != nil { + v, ok := b.indexBlockIdx[hash] + if !ok { + v = len(b.IndexBlocks) - blockOffset + b.IndexBlocks = append(b.IndexBlocks, n) + b.indexBlockIdx[hash] = v + } + n.index = nodeIndex{0, v} + } else { + h, ok := b.valueBlockIdx[hash] + if !ok { + bestI, bestSize := 0, blockSize*b.ValueSize + for i, c := range b.Compactions[1:] { + if sz, ok := c.c.Size(n.values); ok && bestSize > sz { + bestI, bestSize = i+1, sz + } + } + c := &b.Compactions[bestI] + c.totalSize += bestSize + v := c.c.Store(n.values) + if c.maxHandle < v { + c.maxHandle = v + } + h = nodeIndex{bestI, int(v)} + b.valueBlockIdx[hash] = h + } + n.index = h + } + return hash +} diff --git a/vendor/golang.org/x/text/internal/ucd/ucd.go b/vendor/golang.org/x/text/internal/ucd/ucd.go new file mode 100644 index 000000000..309e8d8b1 --- /dev/null +++ b/vendor/golang.org/x/text/internal/ucd/ucd.go @@ -0,0 +1,376 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ucd provides a parser for Unicode Character Database files, the +// format of which is defined in http://www.unicode.org/reports/tr44/. See +// http://www.unicode.org/Public/UCD/latest/ucd/ for example files. +// +// It currently does not support substitutions of missing fields. +package ucd // import "golang.org/x/text/internal/ucd" + +import ( + "bufio" + "bytes" + "errors" + "io" + "log" + "regexp" + "strconv" + "strings" +) + +// UnicodeData.txt fields. +const ( + CodePoint = iota + Name + GeneralCategory + CanonicalCombiningClass + BidiClass + DecompMapping + DecimalValue + DigitValue + NumericValue + BidiMirrored + Unicode1Name + ISOComment + SimpleUppercaseMapping + SimpleLowercaseMapping + SimpleTitlecaseMapping +) + +// Parse calls f for each entry in the given reader of a UCD file. It will close +// the reader upon return. It will call log.Fatal if any error occurred. +// +// This implements the most common usage pattern of using Parser. +func Parse(r io.ReadCloser, f func(p *Parser)) { + defer r.Close() + + p := New(r) + for p.Next() { + f(p) + } + if err := p.Err(); err != nil { + r.Close() // os.Exit will cause defers not to be called. + log.Fatal(err) + } +} + +// An Option is used to configure a Parser. +type Option func(p *Parser) + +func keepRanges(p *Parser) { + p.keepRanges = true +} + +var ( + // KeepRanges prevents the expansion of ranges. The raw ranges can be + // obtained by calling Range(0) on the parser. + KeepRanges Option = keepRanges +) + +// The Part option register a handler for lines starting with a '@'. The text +// after a '@' is available as the first field. Comments are handled as usual. +func Part(f func(p *Parser)) Option { + return func(p *Parser) { + p.partHandler = f + } +} + +// The CommentHandler option passes comments that are on a line by itself to +// a given handler. +func CommentHandler(f func(s string)) Option { + return func(p *Parser) { + p.commentHandler = f + } +} + +// A Parser parses Unicode Character Database (UCD) files. +type Parser struct { + scanner *bufio.Scanner + + keepRanges bool // Don't expand rune ranges in field 0. + + err error + comment []byte + field [][]byte + // parsedRange is needed in case Range(0) is called more than once for one + // field. In some cases this requires scanning ahead. + parsedRange bool + rangeStart, rangeEnd rune + + partHandler func(p *Parser) + commentHandler func(s string) +} + +func (p *Parser) setError(err error) { + if p.err == nil { + p.err = err + } +} + +func (p *Parser) getField(i int) []byte { + if i >= len(p.field) { + return nil + } + return p.field[i] +} + +// Err returns a non-nil error if any error occurred during parsing. +func (p *Parser) Err() error { + return p.err +} + +// New returns a Parser for the given Reader. +func New(r io.Reader, o ...Option) *Parser { + p := &Parser{ + scanner: bufio.NewScanner(r), + } + for _, f := range o { + f(p) + } + return p +} + +// Next parses the next line in the file. It returns true if a line was parsed +// and false if it reached the end of the file. +func (p *Parser) Next() bool { + if !p.keepRanges && p.rangeStart < p.rangeEnd { + p.rangeStart++ + return true + } + p.comment = nil + p.field = p.field[:0] + p.parsedRange = false + + for p.scanner.Scan() { + b := p.scanner.Bytes() + if len(b) == 0 { + continue + } + if b[0] == '#' { + if p.commentHandler != nil { + p.commentHandler(strings.TrimSpace(string(b[1:]))) + } + continue + } + + // Parse line + if i := bytes.IndexByte(b, '#'); i != -1 { + p.comment = bytes.TrimSpace(b[i+1:]) + b = b[:i] + } + if b[0] == '@' { + if p.partHandler != nil { + p.field = append(p.field, bytes.TrimSpace(b[1:])) + p.partHandler(p) + p.field = p.field[:0] + } + p.comment = nil + continue + } + for { + i := bytes.IndexByte(b, ';') + if i == -1 { + p.field = append(p.field, bytes.TrimSpace(b)) + break + } + p.field = append(p.field, bytes.TrimSpace(b[:i])) + b = b[i+1:] + } + if !p.keepRanges { + p.rangeStart, p.rangeEnd = p.getRange(0) + } + return true + } + p.setError(p.scanner.Err()) + return false +} + +func parseRune(b []byte) (rune, error) { + if len(b) > 2 && b[0] == 'U' && b[1] == '+' { + b = b[2:] + } + x, err := strconv.ParseUint(string(b), 16, 32) + return rune(x), err +} + +func (p *Parser) parseRune(b []byte) rune { + x, err := parseRune(b) + p.setError(err) + return x +} + +// Rune parses and returns field i as a rune. +func (p *Parser) Rune(i int) rune { + if i > 0 || p.keepRanges { + return p.parseRune(p.getField(i)) + } + return p.rangeStart +} + +// Runes interprets and returns field i as a sequence of runes. +func (p *Parser) Runes(i int) (runes []rune) { + add := func(b []byte) { + if b = bytes.TrimSpace(b); len(b) > 0 { + runes = append(runes, p.parseRune(b)) + } + } + for b := p.getField(i); ; { + i := bytes.IndexByte(b, ' ') + if i == -1 { + add(b) + break + } + add(b[:i]) + b = b[i+1:] + } + return +} + +var ( + errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>") + + // reRange matches one line of a legacy rune range. + reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$") +) + +// Range parses and returns field i as a rune range. A range is inclusive at +// both ends. If the field only has one rune, first and last will be identical. +// It supports the legacy format for ranges used in UnicodeData.txt. +func (p *Parser) Range(i int) (first, last rune) { + if !p.keepRanges { + return p.rangeStart, p.rangeStart + } + return p.getRange(i) +} + +func (p *Parser) getRange(i int) (first, last rune) { + b := p.getField(i) + if k := bytes.Index(b, []byte("..")); k != -1 { + return p.parseRune(b[:k]), p.parseRune(b[k+2:]) + } + // The first field may not be a rune, in which case we may ignore any error + // and set the range as 0..0. + x, err := parseRune(b) + if err != nil { + // Disable range parsing henceforth. This ensures that an error will be + // returned if the user subsequently will try to parse this field as + // a Rune. + p.keepRanges = true + } + // Special case for UnicodeData that was retained for backwards compatibility. + if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) { + if p.parsedRange { + return p.rangeStart, p.rangeEnd + } + mf := reRange.FindStringSubmatch(p.scanner.Text()) + if mf == nil || !p.scanner.Scan() { + p.setError(errIncorrectLegacyRange) + return x, x + } + // Using Bytes would be more efficient here, but Text is a lot easier + // and this is not a frequent case. + ml := reRange.FindStringSubmatch(p.scanner.Text()) + if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] { + p.setError(errIncorrectLegacyRange) + return x, x + } + p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])]) + p.parsedRange = true + return p.rangeStart, p.rangeEnd + } + return x, x +} + +// bools recognizes all valid UCD boolean values. +var bools = map[string]bool{ + "": false, + "N": false, + "No": false, + "F": false, + "False": false, + "Y": true, + "Yes": true, + "T": true, + "True": true, +} + +// Bool parses and returns field i as a boolean value. +func (p *Parser) Bool(i int) bool { + b := p.getField(i) + for s, v := range bools { + if bstrEq(b, s) { + return v + } + } + p.setError(strconv.ErrSyntax) + return false +} + +// Int parses and returns field i as an integer value. +func (p *Parser) Int(i int) int { + x, err := strconv.ParseInt(string(p.getField(i)), 10, 64) + p.setError(err) + return int(x) +} + +// Uint parses and returns field i as an unsigned integer value. +func (p *Parser) Uint(i int) uint { + x, err := strconv.ParseUint(string(p.getField(i)), 10, 64) + p.setError(err) + return uint(x) +} + +// Float parses and returns field i as a decimal value. +func (p *Parser) Float(i int) float64 { + x, err := strconv.ParseFloat(string(p.getField(i)), 64) + p.setError(err) + return x +} + +// String parses and returns field i as a string value. +func (p *Parser) String(i int) string { + return string(p.getField(i)) +} + +// Strings parses and returns field i as a space-separated list of strings. +func (p *Parser) Strings(i int) []string { + ss := strings.Split(string(p.getField(i)), " ") + for i, s := range ss { + ss[i] = strings.TrimSpace(s) + } + return ss +} + +// Comment returns the comments for the current line. +func (p *Parser) Comment() string { + return string(p.comment) +} + +var errUndefinedEnum = errors.New("ucd: undefined enum value") + +// Enum interprets and returns field i as a value that must be one of the values +// in enum. +func (p *Parser) Enum(i int, enum ...string) string { + b := p.getField(i) + for _, s := range enum { + if bstrEq(b, s) { + return s + } + } + p.setError(errUndefinedEnum) + return "" +} + +func bstrEq(b []byte, s string) bool { + if len(b) != len(s) { + return false + } + for i, c := range b { + if c != s[i] { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/text/secure/doc.go b/vendor/golang.org/x/text/secure/doc.go new file mode 100644 index 000000000..e531c3543 --- /dev/null +++ b/vendor/golang.org/x/text/secure/doc.go @@ -0,0 +1,6 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// secure is a repository of text security related packages. +package secure // import "golang.org/x/text/secure" diff --git a/vendor/golang.org/x/text/unicode/cldr/base.go b/vendor/golang.org/x/text/unicode/cldr/base.go new file mode 100644 index 000000000..2382f4d6d --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/base.go @@ -0,0 +1,100 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "encoding/xml" + "regexp" + "strconv" +) + +// Elem is implemented by every XML element. +type Elem interface { + setEnclosing(Elem) + setName(string) + enclosing() Elem + + GetCommon() *Common +} + +type hidden struct { + CharData string `xml:",chardata"` + Alias *struct { + Common + Source string `xml:"source,attr"` + Path string `xml:"path,attr"` + } `xml:"alias"` + Def *struct { + Common + Choice string `xml:"choice,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + } `xml:"default"` +} + +// Common holds several of the most common attributes and sub elements +// of an XML element. +type Common struct { + XMLName xml.Name + name string + enclElem Elem + Type string `xml:"type,attr,omitempty"` + Reference string `xml:"reference,attr,omitempty"` + Alt string `xml:"alt,attr,omitempty"` + ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` + Draft string `xml:"draft,attr,omitempty"` + hidden +} + +// Default returns the default type to select from the enclosed list +// or "" if no default value is specified. +func (e *Common) Default() string { + if e.Def == nil { + return "" + } + if e.Def.Choice != "" { + return e.Def.Choice + } else if e.Def.Type != "" { + // Type is still used by the default element in collation. + return e.Def.Type + } + return "" +} + +// GetCommon returns e. It is provided such that Common implements Elem. +func (e *Common) GetCommon() *Common { + return e +} + +// Data returns the character data accumulated for this element. +func (e *Common) Data() string { + e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) + return e.CharData +} + +func (e *Common) setName(s string) { + e.name = s +} + +func (e *Common) enclosing() Elem { + return e.enclElem +} + +func (e *Common) setEnclosing(en Elem) { + e.enclElem = en +} + +// Escape characters that can be escaped without further escaping the string. +var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) + +// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. +// It assumes the input string is correctly formatted. +func replaceUnicode(s string) string { + if s[1] == '#' { + r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) + return string(r) + } + r, _, _, _ := strconv.UnquoteChar(s, 0) + return string(r) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go new file mode 100644 index 000000000..2197f8ac2 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/cldr.go @@ -0,0 +1,130 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run makexml.go -output xml.go + +// Package cldr provides a parser for LDML and related XML formats. +// This package is intended to be used by the table generation tools +// for the various internationalization-related packages. +// As the XML types are generated from the CLDR DTD, and as the CLDR standard +// is periodically amended, this package may change considerably over time. +// This mostly means that data may appear and disappear between versions. +// That is, old code should keep compiling for newer versions, but data +// may have moved or changed. +// CLDR version 22 is the first version supported by this package. +// Older versions may not work. +package cldr // import "golang.org/x/text/unicode/cldr" + +import ( + "fmt" + "sort" +) + +// CLDR provides access to parsed data of the Unicode Common Locale Data Repository. +type CLDR struct { + parent map[string][]string + locale map[string]*LDML + resolved map[string]*LDML + bcp47 *LDMLBCP47 + supp *SupplementalData +} + +func makeCLDR() *CLDR { + return &CLDR{ + parent: make(map[string][]string), + locale: make(map[string]*LDML), + resolved: make(map[string]*LDML), + bcp47: &LDMLBCP47{}, + supp: &SupplementalData{}, + } +} + +// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. +func (cldr *CLDR) BCP47() *LDMLBCP47 { + return nil +} + +// Draft indicates the draft level of an element. +type Draft int + +const ( + Approved Draft = iota + Contributed + Provisional + Unconfirmed +) + +var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} + +// ParseDraft returns the Draft value corresponding to the given string. The +// empty string corresponds to Approved. +func ParseDraft(level string) (Draft, error) { + if level == "" { + return Approved, nil + } + for i, s := range drafts { + if level == s { + return Unconfirmed - Draft(i), nil + } + } + return Approved, fmt.Errorf("cldr: unknown draft level %q", level) +} + +func (d Draft) String() string { + return drafts[len(drafts)-1-int(d)] +} + +// SetDraftLevel sets which draft levels to include in the evaluated LDML. +// Any draft element for which the draft level is higher than lev will be excluded. +// If multiple draft levels are available for a single element, the one with the +// lowest draft level will be selected, unless preferDraft is true, in which case +// the highest draft will be chosen. +// It is assumed that the underlying LDML is canonicalized. +func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { + // TODO: implement + cldr.resolved = make(map[string]*LDML) +} + +// RawLDML returns the LDML XML for id in unresolved form. +// id must be one of the strings returned by Locales. +func (cldr *CLDR) RawLDML(loc string) *LDML { + return cldr.locale[loc] +} + +// LDML returns the fully resolved LDML XML for loc, which must be one of +// the strings returned by Locales. +func (cldr *CLDR) LDML(loc string) (*LDML, error) { + return cldr.resolve(loc) +} + +// Supplemental returns the parsed supplemental data. If no such data was parsed, +// nil is returned. +func (cldr *CLDR) Supplemental() *SupplementalData { + return cldr.supp +} + +// Locales returns the locales for which there exist files. +// Valid sublocales for which there is no file are not included. +// The root locale is always sorted first. +func (cldr *CLDR) Locales() []string { + loc := []string{"root"} + hasRoot := false + for l, _ := range cldr.locale { + if l == "root" { + hasRoot = true + continue + } + loc = append(loc, l) + } + sort.Strings(loc[1:]) + if !hasRoot { + return loc[1:] + } + return loc +} + +// Get fills in the fields of x based on the XPath path. +func Get(e Elem, path string) (res Elem, err error) { + return walkXPath(e, path) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/collate.go b/vendor/golang.org/x/text/unicode/cldr/collate.go new file mode 100644 index 000000000..80ee28d79 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/collate.go @@ -0,0 +1,359 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "bufio" + "encoding/xml" + "errors" + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// RuleProcessor can be passed to Collator's Process method, which +// parses the rules and calls the respective method for each rule found. +type RuleProcessor interface { + Reset(anchor string, before int) error + Insert(level int, str, context, extend string) error + Index(id string) +} + +const ( + // cldrIndex is a Unicode-reserved sentinel value used to mark the start + // of a grouping within an index. + // We ignore any rule that starts with this rune. + // See http://unicode.org/reports/tr35/#Collation_Elements for details. + cldrIndex = "\uFDD0" + + // specialAnchor is the format in which to represent logical reset positions, + // such as "first tertiary ignorable". + specialAnchor = "<%s/>" +) + +// Process parses the rules for the tailorings of this collation +// and calls the respective methods of p for each rule found. +func (c Collation) Process(p RuleProcessor) (err error) { + if len(c.Cr) > 0 { + if len(c.Cr) > 1 { + return fmt.Errorf("multiple cr elements, want 0 or 1") + } + return processRules(p, c.Cr[0].Data()) + } + if c.Rules.Any != nil { + return c.processXML(p) + } + return errors.New("no tailoring data") +} + +// processRules parses rules in the Collation Rule Syntax defined in +// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. +func processRules(p RuleProcessor, s string) (err error) { + chk := func(s string, e error) string { + if err == nil { + err = e + } + return s + } + i := 0 // Save the line number for use after the loop. + scanner := bufio.NewScanner(strings.NewReader(s)) + for ; scanner.Scan() && err == nil; i++ { + for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { + level := 5 + var ch byte + switch ch, s = s[0], s[1:]; ch { + case '&': // followed by or '[' ']' + if s = skipSpace(s); consume(&s, '[') { + s = chk(parseSpecialAnchor(p, s)) + } else { + s = chk(parseAnchor(p, 0, s)) + } + case '<': // sort relation '<'{1,4}, optionally followed by '*'. + for level = 1; consume(&s, '<'); level++ { + } + if level > 4 { + err = fmt.Errorf("level %d > 4", level) + } + fallthrough + case '=': // identity relation, optionally followed by *. + if consume(&s, '*') { + s = chk(parseSequence(p, level, s)) + } else { + s = chk(parseOrder(p, level, s)) + } + default: + chk("", fmt.Errorf("illegal operator %q", ch)) + break + } + } + } + if chk("", scanner.Err()); err != nil { + return fmt.Errorf("%d: %v", i, err) + } + return nil +} + +// parseSpecialAnchor parses the anchor syntax which is either of the form +// ['before' ] +// or +// [