chore: update docker and k8s
This commit is contained in:
parent
2b5c7f9e91
commit
c2d440a914
1283 changed files with 67741 additions and 27918 deletions
243
Gopkg.lock
generated
243
Gopkg.lock
generated
|
@ -101,6 +101,14 @@
|
|||
revision = "f533f7a102197536779ea3a8cb881d639e21ec5a"
|
||||
version = "v0.4.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6b6d999be55cc0e0704ed0ac4532175f1f71639fbda6d143dc86ac5a5d3bc87f"
|
||||
name = "github.com/Microsoft/hcsshim"
|
||||
packages = ["osversion"]
|
||||
pruneopts = "NUT"
|
||||
revision = "f92b8fb9c92e17da496af5a69e3ee13fbe9916e1"
|
||||
version = "v0.8.6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:d484472e9af62758e3963ee4d8112d5da58cd61833d71e3c1d25165a96d4518a"
|
||||
|
@ -109,14 +117,6 @@
|
|||
pruneopts = "NUT"
|
||||
revision = "289a3b81f5aedc99f8d6eb0f67827c142f1310d8"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1b532462adeb680f8526152d0bc1baa203ce6b63b13070b9f9bd5812ef74dfce"
|
||||
name = "github.com/Nvveen/Gotty"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "a8b993ba6abdb0e0c12b0125c603323a71c7790c"
|
||||
source = "github.com/ijc25/Gotty"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a4068a93355ba3cff0a719425713123d23c90010cb4d023b40c679a22465736d"
|
||||
|
@ -278,11 +278,16 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fc8dbcc2a5de7c093e167828ebbdf551641761d2ad75431d3a167d467a264115"
|
||||
digest = "1:835fc727705e8cf858ea44ad9bd872ff826d1c6b019598e0f9a64c9b942a947b"
|
||||
name = "github.com/containerd/continuity"
|
||||
packages = ["pathdriver"]
|
||||
packages = [
|
||||
"fs",
|
||||
"pathdriver",
|
||||
"syscallx",
|
||||
"sysx",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "b2b946a77f5973f420514090d6f6dd58b08303f0"
|
||||
revision = "aaeac12a7ffcd198ae25440a9dff125c2e2703a7"
|
||||
|
||||
[[projects]]
|
||||
branch = "containous-fork"
|
||||
|
@ -357,25 +362,27 @@
|
|||
version = "v0.21.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cf7cba074c4d2f8e2a5cc2f10b1f6762c86cff2e39917b9f9a6dbd7df57fe9c9"
|
||||
digest = "1:067906552492b3c4a94057ba33279ad1af906a701be3b7d6a92a9478c25fc24f"
|
||||
name = "github.com/docker/cli"
|
||||
packages = [
|
||||
"cli/command/image/build",
|
||||
"cli/config",
|
||||
"cli/config/configfile",
|
||||
"cli/config/credentials",
|
||||
"cli/config/types",
|
||||
"opts",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "6b63d7b96a41055baddc3fa71f381c7f60bd5d8e"
|
||||
revision = "5b38d82aa0767e32f53b5821a9a5044f20eeef29"
|
||||
version = "v19.03.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:85d8985e29b9a55ed2ede89e90c4418cf7e7b0e539cf0e00088f2bd9754c028c"
|
||||
digest = "1:26a1d70717dac8c37ae869d86103b9d72b71a6faf383a5e9b5b7e602c9b9743b"
|
||||
name = "github.com/docker/distribution"
|
||||
packages = [
|
||||
".",
|
||||
"context",
|
||||
"digestset",
|
||||
"metrics",
|
||||
"reference",
|
||||
"registry/api/errcode",
|
||||
"registry/api/v2",
|
||||
|
@ -385,13 +392,13 @@
|
|||
"registry/client/transport",
|
||||
"registry/storage/cache",
|
||||
"registry/storage/cache/memory",
|
||||
"uuid",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
|
||||
revision = "2461543d988979529609e8cb6fca9ca190dc48da"
|
||||
version = "v2.7.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2e4c2b6359d0344ddafb04133bea7a6ff1b8d4db1ab6ee02a43dab1f6b25b337"
|
||||
digest = "1:c2ec28361058cb8c28c4a9b9a505fb403e227b1476f24a3006de5135643dd0bb"
|
||||
name = "github.com/docker/docker"
|
||||
packages = [
|
||||
"api",
|
||||
|
@ -413,6 +420,7 @@
|
|||
"builder/dockerignore",
|
||||
"builder/remotecontext/git",
|
||||
"client",
|
||||
"errdefs",
|
||||
"pkg/archive",
|
||||
"pkg/fileutils",
|
||||
"pkg/homedir",
|
||||
|
@ -437,15 +445,16 @@
|
|||
"registry/resumable",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "7848b8beb9d38a98a78b75f78e05f8d2255f9dfe"
|
||||
revision = "fa8dd90ceb7bcb9d554d27e0b9087ab83e54bd2b"
|
||||
source = "github.com/docker/engine"
|
||||
version = "v19.03.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:82d43dbea74bc51ed1a8beab6dc3ea09185da574e7ab6a96e3804e9b9d55fd2a"
|
||||
digest = "1:8866486038791fe65ea1abf660041423954b1f3fb99ea6a0ad8424422e943458"
|
||||
name = "github.com/docker/docker-credential-helpers"
|
||||
packages = [
|
||||
"client",
|
||||
"credentials",
|
||||
"pass",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1"
|
||||
|
@ -463,6 +472,14 @@
|
|||
revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c49ecccb8abda39a5a48a99267d6abfdf37f59f1c7fdbcbe04b3b9afc3ab625d"
|
||||
name = "github.com/docker/go-metrics"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "b84716841b82eab644a0c64fc8b42d480e49add5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9d8fe33c2e1aa01c76db67f285ffdb7dbdb5c5d90d9deb296526d67b9798ce7b"
|
||||
name = "github.com/docker/go-units"
|
||||
|
@ -472,7 +489,7 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9b26bdc6b9952f728f61f510a48875c38974591c69b0afa77dcfe466c6162e9e"
|
||||
digest = "1:374db732c8336336840f94794cb57aa9bb55d5840e76dae8142a7d15272cc126"
|
||||
name = "github.com/docker/libcompose"
|
||||
packages = [
|
||||
"config",
|
||||
|
@ -497,7 +514,7 @@
|
|||
"yaml",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "57bd716502dcbe1799f026148016022b0f3b989c"
|
||||
revision = "eac9fe1b8b032d757f9e281a19a62195622a5a11"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5f4c34ef3ef56be4a7f57cc43d8d219930cbcc82fdff174518703200ad165c0e"
|
||||
|
@ -545,6 +562,14 @@
|
|||
pruneopts = "NUT"
|
||||
revision = "30f82fa23fd844bd5bb1e5f216db87fd77b5eb43"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4304cca260ab815326ca42d9c28fb843342748267034c51963e13f5e54e727d1"
|
||||
name = "github.com/evanphx/json-patch"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "026c730a0dcc5d11f93f1cf1cc65b01247ea7b6f"
|
||||
version = "v4.5.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6f26e34204ddad638d25456c6443c2763aa38954558226722c253e08e9f491fa"
|
||||
name = "github.com/exoscale/egoscale"
|
||||
|
@ -576,13 +601,6 @@
|
|||
pruneopts = "NUT"
|
||||
revision = "99a156b96fb2f9dbe6465affa51bbdccdd37174d"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:abfe129dc92b16fbf0cc9d6336096a2823151756f62072a700eb10754141b38e"
|
||||
name = "github.com/ghodss/yaml"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d82b2dc81c551e7c15f31523a2cc8ee9121b39cfbf63174d98a0bc8edf2d3c5e"
|
||||
name = "github.com/go-acme/lego"
|
||||
|
@ -746,13 +764,6 @@
|
|||
revision = "909568be09de550ed094403c2bf8a261b5bb730a"
|
||||
version = "v0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:78b8040ece2ff622580def2708b9eb0b2857711b6744c475439bf337e9c677ea"
|
||||
name = "github.com/golang/glog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "44145f04b68cf362d9c4df2182967c2275eaefed"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2d0636a8c490d2272dd725db26f74a537111b99b9dbdda0d8b98febe63702aa4"
|
||||
name = "github.com/golang/protobuf"
|
||||
|
@ -776,12 +787,18 @@
|
|||
revision = "553a641470496b2327abcac10b36396bd98e45c9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:245bd4eb633039cd66106a5d340ae826d87f4e36a8602fcc940e14176fd26ea7"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
digest = "1:bf40199583e5143d1472fc34d10d6f4b69d97572142acf343b3e43136da40823"
|
||||
name = "github.com/google/go-cmp"
|
||||
packages = [
|
||||
"cmp",
|
||||
"cmp/internal/diff",
|
||||
"cmp/internal/flags",
|
||||
"cmp/internal/function",
|
||||
"cmp/internal/value",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
|
||||
revision = "6f77996f0c42f7b84e5a2b252227263f93432e9b"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7e49319ff995ba7b1c89845b02908d609db90879192e0f24d6338fd715043e50"
|
||||
|
@ -873,17 +890,6 @@
|
|||
revision = "0bd13642feb8f57acc0d8e3a568edc34e05a74b9"
|
||||
version = "1.1.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [
|
||||
".",
|
||||
"diskcache",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f7b3db9cb74d13f6a7cf84b3801e68585745eacaf7d40cc10ecc4734c30503d3"
|
||||
name = "github.com/hashicorp/go-version"
|
||||
|
@ -1167,6 +1173,14 @@
|
|||
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:946bf1e858c4884cc82907463b0579dfd3b84e1b541bd49808d2bcfcb9452be0"
|
||||
name = "github.com/morikuni/aec"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "39771216ff4c63d11f5e604076f9c45e8be1067b"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7bb97a5f80a2429fa605e176e16cf682cbb5ac681f421a06efb4e3b8efae6e5f"
|
||||
name = "github.com/mvdan/xurls"
|
||||
|
@ -1298,22 +1312,6 @@
|
|||
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
|
||||
version = "v2.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
|
||||
name = "github.com/petar/GoLLRB"
|
||||
packages = ["llrb"]
|
||||
pruneopts = "NUT"
|
||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6"
|
||||
name = "github.com/peterbourgon/diskv"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:44c66ad69563dbe3f8e76d7d6cad21a03626e53f1875b5ab163ded419e01ca7a"
|
||||
name = "github.com/philhofer/fwd"
|
||||
|
@ -1610,11 +1608,12 @@
|
|||
revision = "e02fc20de94c78484cd5ffb007f8af96be030a45"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:60e24d485a33cb9bffc041de6a5f1596b8ef8d9a9bb9e9f3834c72fd5a96e76a"
|
||||
digest = "1:5185937abbca299da07bcbeb4e38dfa186c09fbf60042c3d07e0073a0634a78f"
|
||||
name = "github.com/xeipuuv/gojsonschema"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "0c8571ac0ce161a5feb57375a9cdf148c98c0f70"
|
||||
revision = "f971f3cd73b2899de6923801c147f075263e0c50"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:aafe0319af5410fb19a23a575ea6ee4b14253e122ef87f936bac65ea1e6b280c"
|
||||
|
@ -1713,6 +1712,14 @@
|
|||
pruneopts = "NUT"
|
||||
revision = "ec22f46f877b4505e0117eeaab541714644fdd28"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b521f10a2d8fa85c04a8ef4e62f2d1e14d303599a55d64dabf9f5a02f84d35eb"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["errgroup"]
|
||||
pruneopts = "NUT"
|
||||
revision = "112230192c580c3556b8cee6403af37a4fc5f28c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:51bfac9fe01b6a949bfed6db70b00bada281f0d64e5296ec644163aa977bfee0"
|
||||
name = "golang.org/x/sys"
|
||||
|
@ -1922,39 +1929,47 @@
|
|||
version = "v2.1.4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:302ad18387350c3d9792da66de666f76d2ca8c62c47dd6b9434269c7cfa18971"
|
||||
digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "53feefa2559fb8dfa8d81baad31be332c97d6c77"
|
||||
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
|
||||
version = "v2.2.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b1c6723e934087c2fa159e1c6a309c3c5c0b9a7d209c2ba6028f21240ebe7606"
|
||||
digest = "1:759855d98a898d7d6a091b89b9c85c03d6efd673c44e15561d716543f4e88f31"
|
||||
name = "k8s.io/api"
|
||||
packages = [
|
||||
"admissionregistration/v1alpha1",
|
||||
"admissionregistration/v1beta1",
|
||||
"apps/v1",
|
||||
"apps/v1beta1",
|
||||
"apps/v1beta2",
|
||||
"auditregistration/v1alpha1",
|
||||
"authentication/v1",
|
||||
"authentication/v1beta1",
|
||||
"authorization/v1",
|
||||
"authorization/v1beta1",
|
||||
"autoscaling/v1",
|
||||
"autoscaling/v2beta1",
|
||||
"autoscaling/v2beta2",
|
||||
"batch/v1",
|
||||
"batch/v1beta1",
|
||||
"batch/v2alpha1",
|
||||
"certificates/v1beta1",
|
||||
"coordination/v1",
|
||||
"coordination/v1beta1",
|
||||
"core/v1",
|
||||
"events/v1beta1",
|
||||
"extensions/v1beta1",
|
||||
"networking/v1",
|
||||
"networking/v1beta1",
|
||||
"node/v1alpha1",
|
||||
"node/v1beta1",
|
||||
"policy/v1beta1",
|
||||
"rbac/v1",
|
||||
"rbac/v1alpha1",
|
||||
"rbac/v1beta1",
|
||||
"scheduling/v1",
|
||||
"scheduling/v1alpha1",
|
||||
"scheduling/v1beta1",
|
||||
"settings/v1alpha1",
|
||||
|
@ -1963,11 +1978,11 @@
|
|||
"storage/v1beta1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e86510ea3fe79b17eda7b8b3bb5cf8811d3af968"
|
||||
version = "kubernetes-1.11.7"
|
||||
revision = "b59d8169aab578b59c8d331a0e8cf6c56fae5fa0"
|
||||
version = "kubernetes-1.15.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1fa62171c3cf8abf9933ec87c1a00881f8f086a5928b2a8481c469ab67cb4104"
|
||||
digest = "1:155388665cbca8ef2eea33692deacce31f5ee2029a1e5c50a83908e4e2fafc4f"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/errors",
|
||||
|
@ -1999,6 +2014,7 @@
|
|||
"pkg/util/intstr",
|
||||
"pkg/util/json",
|
||||
"pkg/util/mergepatch",
|
||||
"pkg/util/naming",
|
||||
"pkg/util/net",
|
||||
"pkg/util/runtime",
|
||||
"pkg/util/sets",
|
||||
|
@ -2013,32 +2029,37 @@
|
|||
"third_party/forked/golang/reflect",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "1525e4dadd2df06debdeb27ee24ac7db5b6413b1"
|
||||
version = "kubernetes-1.11.7"
|
||||
revision = "1799e75a07195de9460b8ef7300883499f12127b"
|
||||
version = "kubernetes-1.15.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b557d722202c63ce865634b8adc42b7d283b7d71bd52c7becf525abc04b8bed9"
|
||||
digest = "1:dca55f4df9d801b3ed52459f9df1d4def74e5a5c55a3f976c614bd972913a56d"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
"discovery/fake",
|
||||
"informers",
|
||||
"informers/admissionregistration",
|
||||
"informers/admissionregistration/v1alpha1",
|
||||
"informers/admissionregistration/v1beta1",
|
||||
"informers/apps",
|
||||
"informers/apps/v1",
|
||||
"informers/apps/v1beta1",
|
||||
"informers/apps/v1beta2",
|
||||
"informers/auditregistration",
|
||||
"informers/auditregistration/v1alpha1",
|
||||
"informers/autoscaling",
|
||||
"informers/autoscaling/v1",
|
||||
"informers/autoscaling/v2beta1",
|
||||
"informers/autoscaling/v2beta2",
|
||||
"informers/batch",
|
||||
"informers/batch/v1",
|
||||
"informers/batch/v1beta1",
|
||||
"informers/batch/v2alpha1",
|
||||
"informers/certificates",
|
||||
"informers/certificates/v1beta1",
|
||||
"informers/coordination",
|
||||
"informers/coordination/v1",
|
||||
"informers/coordination/v1beta1",
|
||||
"informers/core",
|
||||
"informers/core/v1",
|
||||
"informers/events",
|
||||
|
@ -2048,6 +2069,10 @@
|
|||
"informers/internalinterfaces",
|
||||
"informers/networking",
|
||||
"informers/networking/v1",
|
||||
"informers/networking/v1beta1",
|
||||
"informers/node",
|
||||
"informers/node/v1alpha1",
|
||||
"informers/node/v1beta1",
|
||||
"informers/policy",
|
||||
"informers/policy/v1beta1",
|
||||
"informers/rbac",
|
||||
|
@ -2055,6 +2080,7 @@
|
|||
"informers/rbac/v1alpha1",
|
||||
"informers/rbac/v1beta1",
|
||||
"informers/scheduling",
|
||||
"informers/scheduling/v1",
|
||||
"informers/scheduling/v1alpha1",
|
||||
"informers/scheduling/v1beta1",
|
||||
"informers/settings",
|
||||
|
@ -2065,54 +2091,68 @@
|
|||
"informers/storage/v1beta1",
|
||||
"kubernetes",
|
||||
"kubernetes/scheme",
|
||||
"kubernetes/typed/admissionregistration/v1alpha1",
|
||||
"kubernetes/typed/admissionregistration/v1beta1",
|
||||
"kubernetes/typed/apps/v1",
|
||||
"kubernetes/typed/apps/v1beta1",
|
||||
"kubernetes/typed/apps/v1beta2",
|
||||
"kubernetes/typed/auditregistration/v1alpha1",
|
||||
"kubernetes/typed/authentication/v1",
|
||||
"kubernetes/typed/authentication/v1beta1",
|
||||
"kubernetes/typed/authorization/v1",
|
||||
"kubernetes/typed/authorization/v1beta1",
|
||||
"kubernetes/typed/autoscaling/v1",
|
||||
"kubernetes/typed/autoscaling/v2beta1",
|
||||
"kubernetes/typed/autoscaling/v2beta2",
|
||||
"kubernetes/typed/batch/v1",
|
||||
"kubernetes/typed/batch/v1beta1",
|
||||
"kubernetes/typed/batch/v2alpha1",
|
||||
"kubernetes/typed/certificates/v1beta1",
|
||||
"kubernetes/typed/coordination/v1",
|
||||
"kubernetes/typed/coordination/v1beta1",
|
||||
"kubernetes/typed/core/v1",
|
||||
"kubernetes/typed/events/v1beta1",
|
||||
"kubernetes/typed/extensions/v1beta1",
|
||||
"kubernetes/typed/networking/v1",
|
||||
"kubernetes/typed/networking/v1beta1",
|
||||
"kubernetes/typed/node/v1alpha1",
|
||||
"kubernetes/typed/node/v1beta1",
|
||||
"kubernetes/typed/policy/v1beta1",
|
||||
"kubernetes/typed/rbac/v1",
|
||||
"kubernetes/typed/rbac/v1alpha1",
|
||||
"kubernetes/typed/rbac/v1beta1",
|
||||
"kubernetes/typed/scheduling/v1",
|
||||
"kubernetes/typed/scheduling/v1alpha1",
|
||||
"kubernetes/typed/scheduling/v1beta1",
|
||||
"kubernetes/typed/settings/v1alpha1",
|
||||
"kubernetes/typed/storage/v1",
|
||||
"kubernetes/typed/storage/v1alpha1",
|
||||
"kubernetes/typed/storage/v1beta1",
|
||||
"listers/admissionregistration/v1alpha1",
|
||||
"listers/admissionregistration/v1beta1",
|
||||
"listers/apps/v1",
|
||||
"listers/apps/v1beta1",
|
||||
"listers/apps/v1beta2",
|
||||
"listers/auditregistration/v1alpha1",
|
||||
"listers/autoscaling/v1",
|
||||
"listers/autoscaling/v2beta1",
|
||||
"listers/autoscaling/v2beta2",
|
||||
"listers/batch/v1",
|
||||
"listers/batch/v1beta1",
|
||||
"listers/batch/v2alpha1",
|
||||
"listers/certificates/v1beta1",
|
||||
"listers/coordination/v1",
|
||||
"listers/coordination/v1beta1",
|
||||
"listers/core/v1",
|
||||
"listers/events/v1beta1",
|
||||
"listers/extensions/v1beta1",
|
||||
"listers/networking/v1",
|
||||
"listers/networking/v1beta1",
|
||||
"listers/node/v1alpha1",
|
||||
"listers/node/v1beta1",
|
||||
"listers/policy/v1beta1",
|
||||
"listers/rbac/v1",
|
||||
"listers/rbac/v1alpha1",
|
||||
"listers/rbac/v1beta1",
|
||||
"listers/scheduling/v1",
|
||||
"listers/scheduling/v1alpha1",
|
||||
"listers/scheduling/v1beta1",
|
||||
"listers/settings/v1alpha1",
|
||||
|
@ -2137,20 +2177,19 @@
|
|||
"tools/pager",
|
||||
"tools/reference",
|
||||
"transport",
|
||||
"util/buffer",
|
||||
"util/cert",
|
||||
"util/connrotation",
|
||||
"util/flowcontrol",
|
||||
"util/homedir",
|
||||
"util/integer",
|
||||
"util/keyutil",
|
||||
"util/retry",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
|
||||
version = "v8.0.0"
|
||||
revision = "8e956561bbf57253b1d19c449d0f24e8cb18d467"
|
||||
version = "kubernetes-1.15.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7320216e50843bfc1092d20b535a20683abc967e0eba38441c85c3265da10a34"
|
||||
digest = "1:f41480fd8c5f54b13326ee0f2ee398d5734789b790dbc4db57f9b08a0d63139a"
|
||||
name = "k8s.io/code-generator"
|
||||
packages = [
|
||||
"cmd/client-gen",
|
||||
|
@ -2171,11 +2210,12 @@
|
|||
"cmd/lister-gen",
|
||||
"cmd/lister-gen/args",
|
||||
"cmd/lister-gen/generators",
|
||||
"pkg/namer",
|
||||
"pkg/util",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "f8cba74510f397bac80157a6c4ccb0ffbc31b9d0"
|
||||
version = "kubernetes-1.11.7"
|
||||
revision = "18da4a14b22b17d2fa761e50037fabfbacec225b"
|
||||
version = "kubernetes-1.15.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -2210,6 +2250,26 @@
|
|||
pruneopts = "NUT"
|
||||
revision = "15615b16d372105f0c69ff47dfe7402926a65aaa"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2d3f59daa4b479ff4e100a2e1d8fea6780040fdadc177869531fe4cc29407f55"
|
||||
name = "k8s.io/utils"
|
||||
packages = [
|
||||
"buffer",
|
||||
"integer",
|
||||
"trace",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "581e00157fb1a0435d4fac54a52d1ca1e481d60e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
||||
name = "sigs.k8s.io/yaml"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
|
||||
version = "v1.1.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
|
@ -2320,6 +2380,7 @@
|
|||
"k8s.io/apimachinery/pkg/runtime/serializer",
|
||||
"k8s.io/apimachinery/pkg/types",
|
||||
"k8s.io/apimachinery/pkg/util/intstr",
|
||||
"k8s.io/apimachinery/pkg/util/runtime",
|
||||
"k8s.io/apimachinery/pkg/watch",
|
||||
"k8s.io/client-go/discovery",
|
||||
"k8s.io/client-go/discovery/fake",
|
||||
|
|
27
Gopkg.toml
27
Gopkg.toml
|
@ -203,19 +203,19 @@ required = [
|
|||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "8.0.0" # 8.0.0
|
||||
version = "kubernetes-1.15.1" # kubernetes-1.15.1
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/code-generator"
|
||||
version = "kubernetes-1.11.7"
|
||||
version = "kubernetes-1.15.1" # "kubernetes-1.15.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.11.7" # "kubernetes-1.11.7"
|
||||
version = "kubernetes-1.15.1" # "kubernetes-1.15.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.11.7" # "kubernetes-1.11.7"
|
||||
version = "kubernetes-1.15.1" # "kubernetes-1.15.1"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/json-iterator/go"
|
||||
|
@ -235,28 +235,29 @@ required = [
|
|||
|
||||
[[constraint]]
|
||||
name = "github.com/docker/docker"
|
||||
revision = "7848b8beb9d38a98a78b75f78e05f8d2255f9dfe"
|
||||
source = "github.com/docker/engine"
|
||||
version = "v19.03.1"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/docker/docker"
|
||||
revision = "7848b8beb9d38a98a78b75f78e05f8d2255f9dfe"
|
||||
source = "github.com/docker/engine"
|
||||
version = "v19.03.1"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/docker/cli"
|
||||
revision = "6b63d7b96a41055baddc3fa71f381c7f60bd5d8e"
|
||||
version = "v19.03.1"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/docker/distribution"
|
||||
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
|
||||
version = "v2.7.1"
|
||||
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "github.com/docker/libcompose"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/Nvveen/Gotty"
|
||||
revision = "a8b993ba6abdb0e0c12b0125c603323a71c7790c"
|
||||
source = "github.com/ijc25/Gotty"
|
||||
name = "github.com/xeipuuv/gojsonschema"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[override]]
|
||||
# ALWAYS keep this override
|
||||
|
@ -282,3 +283,7 @@ required = [
|
|||
[[constraint]]
|
||||
name = "github.com/ExpediaDotCom/haystack-client-go"
|
||||
version = "0.2.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
version = "v2.2.2"
|
||||
|
|
|
@ -134,7 +134,7 @@ func taskState(state swarm.TaskState) func(*swarm.TaskStatus) {
|
|||
|
||||
func taskContainerStatus(id string) func(*swarm.TaskStatus) {
|
||||
return func(status *swarm.TaskStatus) {
|
||||
status.ContainerStatus = swarm.ContainerStatus{
|
||||
status.ContainerStatus = &swarm.ContainerStatus{
|
||||
ContainerID: id,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,8 +36,6 @@ import (
|
|||
type Interface interface {
|
||||
Discovery() discovery.DiscoveryInterface
|
||||
TraefikV1alpha1() traefikv1alpha1.TraefikV1alpha1Interface
|
||||
// Deprecated: please explicitly pick a version if possible.
|
||||
Traefik() traefikv1alpha1.TraefikV1alpha1Interface
|
||||
}
|
||||
|
||||
// Clientset contains the clients for groups. Each group has exactly one
|
||||
|
@ -52,12 +50,6 @@ func (c *Clientset) TraefikV1alpha1() traefikv1alpha1.TraefikV1alpha1Interface {
|
|||
return c.traefikV1alpha1
|
||||
}
|
||||
|
||||
// Deprecated: Traefik retrieves the default version of TraefikClient.
|
||||
// Please explicitly pick a version.
|
||||
func (c *Clientset) Traefik() traefikv1alpha1.TraefikV1alpha1Interface {
|
||||
return c.traefikV1alpha1
|
||||
}
|
||||
|
||||
// Discovery retrieves the DiscoveryClient
|
||||
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
||||
if c == nil {
|
||||
|
|
|
@ -49,7 +49,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset {
|
|||
}
|
||||
}
|
||||
|
||||
cs := &Clientset{}
|
||||
cs := &Clientset{tracker: o}
|
||||
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
|
||||
cs.AddReactor("*", "*", testing.ObjectReaction(o))
|
||||
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
|
||||
|
@ -71,20 +71,20 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset {
|
|||
type Clientset struct {
|
||||
testing.Fake
|
||||
discovery *fakediscovery.FakeDiscovery
|
||||
tracker testing.ObjectTracker
|
||||
}
|
||||
|
||||
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
||||
return c.discovery
|
||||
}
|
||||
|
||||
func (c *Clientset) Tracker() testing.ObjectTracker {
|
||||
return c.tracker
|
||||
}
|
||||
|
||||
var _ clientset.Interface = &Clientset{}
|
||||
|
||||
// TraefikV1alpha1 retrieves the TraefikV1alpha1Client
|
||||
func (c *Clientset) TraefikV1alpha1() traefikv1alpha1.TraefikV1alpha1Interface {
|
||||
return &faketraefikv1alpha1.FakeTraefikV1alpha1{Fake: &c.Fake}
|
||||
}
|
||||
|
||||
// Traefik retrieves the TraefikV1alpha1Client
|
||||
func (c *Clientset) Traefik() traefikv1alpha1.TraefikV1alpha1Interface {
|
||||
return &faketraefikv1alpha1.FakeTraefikV1alpha1{Fake: &c.Fake}
|
||||
}
|
||||
|
|
|
@ -32,15 +32,14 @@ import (
|
|||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
var scheme = runtime.NewScheme()
|
||||
var codecs = serializer.NewCodecFactory(scheme)
|
||||
var parameterCodec = runtime.NewParameterCodec(scheme)
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
|
||||
AddToScheme(scheme)
|
||||
var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
traefikv1alpha1.AddToScheme,
|
||||
}
|
||||
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
|
@ -53,10 +52,13 @@ func init() {
|
|||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
traefikv1alpha1.AddToScheme(scheme)
|
||||
var AddToScheme = localSchemeBuilder.AddToScheme
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
|
||||
utilruntime.Must(AddToScheme(scheme))
|
||||
}
|
||||
|
|
|
@ -32,15 +32,14 @@ import (
|
|||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
var Codecs = serializer.NewCodecFactory(Scheme)
|
||||
var ParameterCodec = runtime.NewParameterCodec(Scheme)
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
|
||||
AddToScheme(Scheme)
|
||||
var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
traefikv1alpha1.AddToScheme,
|
||||
}
|
||||
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
|
@ -53,10 +52,13 @@ func init() {
|
|||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
traefikv1alpha1.AddToScheme(scheme)
|
||||
var AddToScheme = localSchemeBuilder.AddToScheme
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
|
||||
utilruntime.Must(AddToScheme(Scheme))
|
||||
}
|
||||
|
|
|
@ -127,7 +127,7 @@ func (c *FakeIngressRoutes) DeleteCollection(options *v1.DeleteOptions, listOpti
|
|||
// Patch applies the patch and returns the patched ingressRoute.
|
||||
func (c *FakeIngressRoutes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.IngressRoute, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(ingressroutesResource, c.ns, name, data, subresources...), &v1alpha1.IngressRoute{})
|
||||
Invokes(testing.NewPatchSubresourceAction(ingressroutesResource, c.ns, name, pt, data, subresources...), &v1alpha1.IngressRoute{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -127,7 +127,7 @@ func (c *FakeIngressRouteTCPs) DeleteCollection(options *v1.DeleteOptions, listO
|
|||
// Patch applies the patch and returns the patched ingressRouteTCP.
|
||||
func (c *FakeIngressRouteTCPs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.IngressRouteTCP, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(ingressroutetcpsResource, c.ns, name, data, subresources...), &v1alpha1.IngressRouteTCP{})
|
||||
Invokes(testing.NewPatchSubresourceAction(ingressroutetcpsResource, c.ns, name, pt, data, subresources...), &v1alpha1.IngressRouteTCP{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -127,7 +127,7 @@ func (c *FakeMiddlewares) DeleteCollection(options *v1.DeleteOptions, listOption
|
|||
// Patch applies the patch and returns the patched middleware.
|
||||
func (c *FakeMiddlewares) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Middleware, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(middlewaresResource, c.ns, name, data, subresources...), &v1alpha1.Middleware{})
|
||||
Invokes(testing.NewPatchSubresourceAction(middlewaresResource, c.ns, name, pt, data, subresources...), &v1alpha1.Middleware{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -127,7 +127,7 @@ func (c *FakeTLSOptions) DeleteCollection(options *v1.DeleteOptions, listOptions
|
|||
// Patch applies the patch and returns the patched tLSOption.
|
||||
func (c *FakeTLSOptions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.TLSOption, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(tlsoptionsResource, c.ns, name, data, subresources...), &v1alpha1.TLSOption{})
|
||||
Invokes(testing.NewPatchSubresourceAction(tlsoptionsResource, c.ns, name, pt, data, subresources...), &v1alpha1.TLSOption{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -27,6 +27,8 @@ THE SOFTWARE.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
scheme "github.com/containous/traefik/pkg/provider/kubernetes/crd/generated/clientset/versioned/scheme"
|
||||
v1alpha1 "github.com/containous/traefik/pkg/provider/kubernetes/crd/traefik/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -83,11 +85,16 @@ func (c *ingressRoutes) Get(name string, options v1.GetOptions) (result *v1alpha
|
|||
|
||||
// List takes label and field selectors, and returns the list of IngressRoutes that match those selectors.
|
||||
func (c *ingressRoutes) List(opts v1.ListOptions) (result *v1alpha1.IngressRouteList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.IngressRouteList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("ingressroutes").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -95,11 +102,16 @@ func (c *ingressRoutes) List(opts v1.ListOptions) (result *v1alpha1.IngressRoute
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested ingressRoutes.
|
||||
func (c *ingressRoutes) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("ingressroutes").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -141,10 +153,15 @@ func (c *ingressRoutes) Delete(name string, options *v1.DeleteOptions) error {
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *ingressRoutes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("ingressroutes").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -27,6 +27,8 @@ THE SOFTWARE.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
scheme "github.com/containous/traefik/pkg/provider/kubernetes/crd/generated/clientset/versioned/scheme"
|
||||
v1alpha1 "github.com/containous/traefik/pkg/provider/kubernetes/crd/traefik/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -83,11 +85,16 @@ func (c *ingressRouteTCPs) Get(name string, options v1.GetOptions) (result *v1al
|
|||
|
||||
// List takes label and field selectors, and returns the list of IngressRouteTCPs that match those selectors.
|
||||
func (c *ingressRouteTCPs) List(opts v1.ListOptions) (result *v1alpha1.IngressRouteTCPList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.IngressRouteTCPList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("ingressroutetcps").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -95,11 +102,16 @@ func (c *ingressRouteTCPs) List(opts v1.ListOptions) (result *v1alpha1.IngressRo
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested ingressRouteTCPs.
|
||||
func (c *ingressRouteTCPs) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("ingressroutetcps").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -141,10 +153,15 @@ func (c *ingressRouteTCPs) Delete(name string, options *v1.DeleteOptions) error
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *ingressRouteTCPs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("ingressroutetcps").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -27,6 +27,8 @@ THE SOFTWARE.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
scheme "github.com/containous/traefik/pkg/provider/kubernetes/crd/generated/clientset/versioned/scheme"
|
||||
v1alpha1 "github.com/containous/traefik/pkg/provider/kubernetes/crd/traefik/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -83,11 +85,16 @@ func (c *middlewares) Get(name string, options v1.GetOptions) (result *v1alpha1.
|
|||
|
||||
// List takes label and field selectors, and returns the list of Middlewares that match those selectors.
|
||||
func (c *middlewares) List(opts v1.ListOptions) (result *v1alpha1.MiddlewareList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.MiddlewareList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("middlewares").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -95,11 +102,16 @@ func (c *middlewares) List(opts v1.ListOptions) (result *v1alpha1.MiddlewareList
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested middlewares.
|
||||
func (c *middlewares) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("middlewares").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -141,10 +153,15 @@ func (c *middlewares) Delete(name string, options *v1.DeleteOptions) error {
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *middlewares) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("middlewares").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -27,6 +27,8 @@ THE SOFTWARE.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
scheme "github.com/containous/traefik/pkg/provider/kubernetes/crd/generated/clientset/versioned/scheme"
|
||||
v1alpha1 "github.com/containous/traefik/pkg/provider/kubernetes/crd/traefik/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -83,11 +85,16 @@ func (c *tLSOptions) Get(name string, options v1.GetOptions) (result *v1alpha1.T
|
|||
|
||||
// List takes label and field selectors, and returns the list of TLSOptions that match those selectors.
|
||||
func (c *tLSOptions) List(opts v1.ListOptions) (result *v1alpha1.TLSOptionList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.TLSOptionList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("tlsoptions").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -95,11 +102,16 @@ func (c *tLSOptions) List(opts v1.ListOptions) (result *v1alpha1.TLSOptionList,
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested tLSOptions.
|
||||
func (c *tLSOptions) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("tlsoptions").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -141,10 +153,15 @@ func (c *tLSOptions) Delete(name string, options *v1.DeleteOptions) error {
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *tLSOptions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("tlsoptions").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -29,7 +29,6 @@ package v1alpha1
|
|||
import (
|
||||
"github.com/containous/traefik/pkg/provider/kubernetes/crd/generated/clientset/versioned/scheme"
|
||||
v1alpha1 "github.com/containous/traefik/pkg/provider/kubernetes/crd/traefik/v1alpha1"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
|
@ -94,7 +93,7 @@ func setConfigDefaults(config *rest.Config) error {
|
|||
gv := v1alpha1.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
|
||||
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
|
||||
|
||||
if config.UserAgent == "" {
|
||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
|
||||
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
|
||||
|
||||
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
|
||||
|
@ -43,4 +44,5 @@ type SharedInformerFactory interface {
|
|||
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
|
||||
type TweakListOptionsFunc func(*v1.ListOptions)
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
|
@ -83,14 +82,6 @@ func (p *Provider) Init() error {
|
|||
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
ctxLog := log.With(context.Background(), log.Str(log.ProviderName, "kubernetescrd"))
|
||||
logger := log.FromContext(ctxLog)
|
||||
// Tell glog (used by client-go) to log into STDERR. Otherwise, we risk
|
||||
// certain kinds of API errors getting logged into a directory not
|
||||
// available in a `FROM scratch` Docker container, causing glog to abort
|
||||
// hard with an exit code > 0.
|
||||
err := flag.Set("logtostderr", "true")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Debugf("Using label selector: %q", p.LabelSelector)
|
||||
k8sClient, err := p.newK8sClient(ctxLog, p.LabelSelector)
|
||||
|
|
|
@ -107,7 +107,7 @@ func (in *IngressRoute) DeepCopyObject() runtime.Object {
|
|||
func (in *IngressRouteList) DeepCopyInto(out *IngressRouteList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]IngressRoute, len(*in))
|
||||
|
@ -200,7 +200,7 @@ func (in *IngressRouteTCP) DeepCopyObject() runtime.Object {
|
|||
func (in *IngressRouteTCPList) DeepCopyInto(out *IngressRouteTCPList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]IngressRouteTCP, len(*in))
|
||||
|
@ -293,7 +293,7 @@ func (in *Middleware) DeepCopyObject() runtime.Object {
|
|||
func (in *MiddlewareList) DeepCopyInto(out *MiddlewareList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Middleware, len(*in))
|
||||
|
@ -476,7 +476,7 @@ func (in *TLSOption) DeepCopyObject() runtime.Object {
|
|||
func (in *TLSOptionList) DeepCopyInto(out *TLSOptionList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]TLSOption, len(*in))
|
||||
|
|
|
@ -3,7 +3,6 @@ package ingress
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
|
@ -95,14 +94,6 @@ func (p *Provider) Init() error {
|
|||
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
|
||||
ctxLog := log.With(context.Background(), log.Str(log.ProviderName, "kubernetes"))
|
||||
logger := log.FromContext(ctxLog)
|
||||
// Tell glog (used by client-go) to log into STDERR. Otherwise, we risk
|
||||
// certain kinds of API errors getting logged into a directory not
|
||||
// available in a `FROM scratch` Docker container, causing glog to abort
|
||||
// hard with an exit code > 0.
|
||||
err := flag.Set("logtostderr", "true")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Debugf("Using Ingress label selector: %q", p.LabelSelector)
|
||||
k8sClient, err := p.newK8sClient(ctxLog, p.LabelSelector)
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
Copyright (c) 2011-2012 Peter Bourgon
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
@ -7,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
191
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/LICENSE
generated
vendored
Normal file
191
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2014 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
22
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/NOTICE
generated
vendored
Normal file
22
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
runhcs is a fork of runc.
|
||||
|
||||
The following is runc's legal notice.
|
||||
|
||||
---
|
||||
|
||||
runc
|
||||
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
51
vendor/github.com/Microsoft/hcsshim/osversion/osversion.go
generated
vendored
Normal file
51
vendor/github.com/Microsoft/hcsshim/osversion/osversion.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
package osversion
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// OSVersion is a wrapper for Windows version information
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
|
||||
type OSVersion struct {
|
||||
Version uint32
|
||||
MajorVersion uint8
|
||||
MinorVersion uint8
|
||||
Build uint16
|
||||
}
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
|
||||
type osVersionInfoEx struct {
|
||||
OSVersionInfoSize uint32
|
||||
MajorVersion uint32
|
||||
MinorVersion uint32
|
||||
BuildNumber uint32
|
||||
PlatformID uint32
|
||||
CSDVersion [128]uint16
|
||||
ServicePackMajor uint16
|
||||
ServicePackMinor uint16
|
||||
SuiteMask uint16
|
||||
ProductType byte
|
||||
Reserve byte
|
||||
}
|
||||
|
||||
// Get gets the operating system version on Windows.
|
||||
// The calling application must be manifested to get the correct version information.
|
||||
func Get() OSVersion {
|
||||
var err error
|
||||
osv := OSVersion{}
|
||||
osv.Version, err = windows.GetVersion()
|
||||
if err != nil {
|
||||
// GetVersion never fails.
|
||||
panic(err)
|
||||
}
|
||||
osv.MajorVersion = uint8(osv.Version & 0xFF)
|
||||
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
|
||||
osv.Build = uint16(osv.Version >> 16)
|
||||
return osv
|
||||
}
|
||||
|
||||
func (osv OSVersion) ToString() string {
|
||||
return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build)
|
||||
}
|
10
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
Normal file
10
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
package osversion
|
||||
|
||||
const (
|
||||
|
||||
// RS2 was a client-only release in case you're asking why it's not in the list.
|
||||
RS1 = 14393
|
||||
RS3 = 16299
|
||||
RS4 = 17134
|
||||
RS5 = 17763
|
||||
)
|
201
vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/LICENSE
generated
vendored
Normal file
201
vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
22
vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE
generated
vendored
Normal file
22
vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
go-runhcs is a fork of go-runc
|
||||
|
||||
The following is runc's legal notice.
|
||||
|
||||
---
|
||||
|
||||
runc
|
||||
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
26
vendor/github.com/Nvveen/Gotty/LICENSE
generated
vendored
26
vendor/github.com/Nvveen/Gotty/LICENSE
generated
vendored
|
@ -1,26 +0,0 @@
|
|||
Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com)
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
The views and conclusions contained in the software and documentation are those
|
||||
of the authors and should not be interpreted as representing official policies,
|
||||
either expressed or implied, of the FreeBSD Project.
|
514
vendor/github.com/Nvveen/Gotty/attributes.go
generated
vendored
514
vendor/github.com/Nvveen/Gotty/attributes.go
generated
vendored
|
@ -1,514 +0,0 @@
|
|||
// Copyright 2012 Neal van Veen. All rights reserved.
|
||||
// Usage of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package gotty
|
||||
|
||||
// Boolean capabilities
|
||||
var BoolAttr = [...]string{
|
||||
"auto_left_margin", "bw",
|
||||
"auto_right_margin", "am",
|
||||
"no_esc_ctlc", "xsb",
|
||||
"ceol_standout_glitch", "xhp",
|
||||
"eat_newline_glitch", "xenl",
|
||||
"erase_overstrike", "eo",
|
||||
"generic_type", "gn",
|
||||
"hard_copy", "hc",
|
||||
"has_meta_key", "km",
|
||||
"has_status_line", "hs",
|
||||
"insert_null_glitch", "in",
|
||||
"memory_above", "da",
|
||||
"memory_below", "db",
|
||||
"move_insert_mode", "mir",
|
||||
"move_standout_mode", "msgr",
|
||||
"over_strike", "os",
|
||||
"status_line_esc_ok", "eslok",
|
||||
"dest_tabs_magic_smso", "xt",
|
||||
"tilde_glitch", "hz",
|
||||
"transparent_underline", "ul",
|
||||
"xon_xoff", "nxon",
|
||||
"needs_xon_xoff", "nxon",
|
||||
"prtr_silent", "mc5i",
|
||||
"hard_cursor", "chts",
|
||||
"non_rev_rmcup", "nrrmc",
|
||||
"no_pad_char", "npc",
|
||||
"non_dest_scroll_region", "ndscr",
|
||||
"can_change", "ccc",
|
||||
"back_color_erase", "bce",
|
||||
"hue_lightness_saturation", "hls",
|
||||
"col_addr_glitch", "xhpa",
|
||||
"cr_cancels_micro_mode", "crxm",
|
||||
"has_print_wheel", "daisy",
|
||||
"row_addr_glitch", "xvpa",
|
||||
"semi_auto_right_margin", "sam",
|
||||
"cpi_changes_res", "cpix",
|
||||
"lpi_changes_res", "lpix",
|
||||
"backspaces_with_bs", "",
|
||||
"crt_no_scrolling", "",
|
||||
"no_correctly_working_cr", "",
|
||||
"gnu_has_meta_key", "",
|
||||
"linefeed_is_newline", "",
|
||||
"has_hardware_tabs", "",
|
||||
"return_does_clr_eol", "",
|
||||
}
|
||||
|
||||
// Numerical capabilities
|
||||
var NumAttr = [...]string{
|
||||
"columns", "cols",
|
||||
"init_tabs", "it",
|
||||
"lines", "lines",
|
||||
"lines_of_memory", "lm",
|
||||
"magic_cookie_glitch", "xmc",
|
||||
"padding_baud_rate", "pb",
|
||||
"virtual_terminal", "vt",
|
||||
"width_status_line", "wsl",
|
||||
"num_labels", "nlab",
|
||||
"label_height", "lh",
|
||||
"label_width", "lw",
|
||||
"max_attributes", "ma",
|
||||
"maximum_windows", "wnum",
|
||||
"max_colors", "colors",
|
||||
"max_pairs", "pairs",
|
||||
"no_color_video", "ncv",
|
||||
"buffer_capacity", "bufsz",
|
||||
"dot_vert_spacing", "spinv",
|
||||
"dot_horz_spacing", "spinh",
|
||||
"max_micro_address", "maddr",
|
||||
"max_micro_jump", "mjump",
|
||||
"micro_col_size", "mcs",
|
||||
"micro_line_size", "mls",
|
||||
"number_of_pins", "npins",
|
||||
"output_res_char", "orc",
|
||||
"output_res_line", "orl",
|
||||
"output_res_horz_inch", "orhi",
|
||||
"output_res_vert_inch", "orvi",
|
||||
"print_rate", "cps",
|
||||
"wide_char_size", "widcs",
|
||||
"buttons", "btns",
|
||||
"bit_image_entwining", "bitwin",
|
||||
"bit_image_type", "bitype",
|
||||
"magic_cookie_glitch_ul", "",
|
||||
"carriage_return_delay", "",
|
||||
"new_line_delay", "",
|
||||
"backspace_delay", "",
|
||||
"horizontal_tab_delay", "",
|
||||
"number_of_function_keys", "",
|
||||
}
|
||||
|
||||
// String capabilities
|
||||
var StrAttr = [...]string{
|
||||
"back_tab", "cbt",
|
||||
"bell", "bel",
|
||||
"carriage_return", "cr",
|
||||
"change_scroll_region", "csr",
|
||||
"clear_all_tabs", "tbc",
|
||||
"clear_screen", "clear",
|
||||
"clr_eol", "el",
|
||||
"clr_eos", "ed",
|
||||
"column_address", "hpa",
|
||||
"command_character", "cmdch",
|
||||
"cursor_address", "cup",
|
||||
"cursor_down", "cud1",
|
||||
"cursor_home", "home",
|
||||
"cursor_invisible", "civis",
|
||||
"cursor_left", "cub1",
|
||||
"cursor_mem_address", "mrcup",
|
||||
"cursor_normal", "cnorm",
|
||||
"cursor_right", "cuf1",
|
||||
"cursor_to_ll", "ll",
|
||||
"cursor_up", "cuu1",
|
||||
"cursor_visible", "cvvis",
|
||||
"delete_character", "dch1",
|
||||
"delete_line", "dl1",
|
||||
"dis_status_line", "dsl",
|
||||
"down_half_line", "hd",
|
||||
"enter_alt_charset_mode", "smacs",
|
||||
"enter_blink_mode", "blink",
|
||||
"enter_bold_mode", "bold",
|
||||
"enter_ca_mode", "smcup",
|
||||
"enter_delete_mode", "smdc",
|
||||
"enter_dim_mode", "dim",
|
||||
"enter_insert_mode", "smir",
|
||||
"enter_secure_mode", "invis",
|
||||
"enter_protected_mode", "prot",
|
||||
"enter_reverse_mode", "rev",
|
||||
"enter_standout_mode", "smso",
|
||||
"enter_underline_mode", "smul",
|
||||
"erase_chars", "ech",
|
||||
"exit_alt_charset_mode", "rmacs",
|
||||
"exit_attribute_mode", "sgr0",
|
||||
"exit_ca_mode", "rmcup",
|
||||
"exit_delete_mode", "rmdc",
|
||||
"exit_insert_mode", "rmir",
|
||||
"exit_standout_mode", "rmso",
|
||||
"exit_underline_mode", "rmul",
|
||||
"flash_screen", "flash",
|
||||
"form_feed", "ff",
|
||||
"from_status_line", "fsl",
|
||||
"init_1string", "is1",
|
||||
"init_2string", "is2",
|
||||
"init_3string", "is3",
|
||||
"init_file", "if",
|
||||
"insert_character", "ich1",
|
||||
"insert_line", "il1",
|
||||
"insert_padding", "ip",
|
||||
"key_backspace", "kbs",
|
||||
"key_catab", "ktbc",
|
||||
"key_clear", "kclr",
|
||||
"key_ctab", "kctab",
|
||||
"key_dc", "kdch1",
|
||||
"key_dl", "kdl1",
|
||||
"key_down", "kcud1",
|
||||
"key_eic", "krmir",
|
||||
"key_eol", "kel",
|
||||
"key_eos", "ked",
|
||||
"key_f0", "kf0",
|
||||
"key_f1", "kf1",
|
||||
"key_f10", "kf10",
|
||||
"key_f2", "kf2",
|
||||
"key_f3", "kf3",
|
||||
"key_f4", "kf4",
|
||||
"key_f5", "kf5",
|
||||
"key_f6", "kf6",
|
||||
"key_f7", "kf7",
|
||||
"key_f8", "kf8",
|
||||
"key_f9", "kf9",
|
||||
"key_home", "khome",
|
||||
"key_ic", "kich1",
|
||||
"key_il", "kil1",
|
||||
"key_left", "kcub1",
|
||||
"key_ll", "kll",
|
||||
"key_npage", "knp",
|
||||
"key_ppage", "kpp",
|
||||
"key_right", "kcuf1",
|
||||
"key_sf", "kind",
|
||||
"key_sr", "kri",
|
||||
"key_stab", "khts",
|
||||
"key_up", "kcuu1",
|
||||
"keypad_local", "rmkx",
|
||||
"keypad_xmit", "smkx",
|
||||
"lab_f0", "lf0",
|
||||
"lab_f1", "lf1",
|
||||
"lab_f10", "lf10",
|
||||
"lab_f2", "lf2",
|
||||
"lab_f3", "lf3",
|
||||
"lab_f4", "lf4",
|
||||
"lab_f5", "lf5",
|
||||
"lab_f6", "lf6",
|
||||
"lab_f7", "lf7",
|
||||
"lab_f8", "lf8",
|
||||
"lab_f9", "lf9",
|
||||
"meta_off", "rmm",
|
||||
"meta_on", "smm",
|
||||
"newline", "_glitch",
|
||||
"pad_char", "npc",
|
||||
"parm_dch", "dch",
|
||||
"parm_delete_line", "dl",
|
||||
"parm_down_cursor", "cud",
|
||||
"parm_ich", "ich",
|
||||
"parm_index", "indn",
|
||||
"parm_insert_line", "il",
|
||||
"parm_left_cursor", "cub",
|
||||
"parm_right_cursor", "cuf",
|
||||
"parm_rindex", "rin",
|
||||
"parm_up_cursor", "cuu",
|
||||
"pkey_key", "pfkey",
|
||||
"pkey_local", "pfloc",
|
||||
"pkey_xmit", "pfx",
|
||||
"print_screen", "mc0",
|
||||
"prtr_off", "mc4",
|
||||
"prtr_on", "mc5",
|
||||
"repeat_char", "rep",
|
||||
"reset_1string", "rs1",
|
||||
"reset_2string", "rs2",
|
||||
"reset_3string", "rs3",
|
||||
"reset_file", "rf",
|
||||
"restore_cursor", "rc",
|
||||
"row_address", "mvpa",
|
||||
"save_cursor", "row_address",
|
||||
"scroll_forward", "ind",
|
||||
"scroll_reverse", "ri",
|
||||
"set_attributes", "sgr",
|
||||
"set_tab", "hts",
|
||||
"set_window", "wind",
|
||||
"tab", "s_magic_smso",
|
||||
"to_status_line", "tsl",
|
||||
"underline_char", "uc",
|
||||
"up_half_line", "hu",
|
||||
"init_prog", "iprog",
|
||||
"key_a1", "ka1",
|
||||
"key_a3", "ka3",
|
||||
"key_b2", "kb2",
|
||||
"key_c1", "kc1",
|
||||
"key_c3", "kc3",
|
||||
"prtr_non", "mc5p",
|
||||
"char_padding", "rmp",
|
||||
"acs_chars", "acsc",
|
||||
"plab_norm", "pln",
|
||||
"key_btab", "kcbt",
|
||||
"enter_xon_mode", "smxon",
|
||||
"exit_xon_mode", "rmxon",
|
||||
"enter_am_mode", "smam",
|
||||
"exit_am_mode", "rmam",
|
||||
"xon_character", "xonc",
|
||||
"xoff_character", "xoffc",
|
||||
"ena_acs", "enacs",
|
||||
"label_on", "smln",
|
||||
"label_off", "rmln",
|
||||
"key_beg", "kbeg",
|
||||
"key_cancel", "kcan",
|
||||
"key_close", "kclo",
|
||||
"key_command", "kcmd",
|
||||
"key_copy", "kcpy",
|
||||
"key_create", "kcrt",
|
||||
"key_end", "kend",
|
||||
"key_enter", "kent",
|
||||
"key_exit", "kext",
|
||||
"key_find", "kfnd",
|
||||
"key_help", "khlp",
|
||||
"key_mark", "kmrk",
|
||||
"key_message", "kmsg",
|
||||
"key_move", "kmov",
|
||||
"key_next", "knxt",
|
||||
"key_open", "kopn",
|
||||
"key_options", "kopt",
|
||||
"key_previous", "kprv",
|
||||
"key_print", "kprt",
|
||||
"key_redo", "krdo",
|
||||
"key_reference", "kref",
|
||||
"key_refresh", "krfr",
|
||||
"key_replace", "krpl",
|
||||
"key_restart", "krst",
|
||||
"key_resume", "kres",
|
||||
"key_save", "ksav",
|
||||
"key_suspend", "kspd",
|
||||
"key_undo", "kund",
|
||||
"key_sbeg", "kBEG",
|
||||
"key_scancel", "kCAN",
|
||||
"key_scommand", "kCMD",
|
||||
"key_scopy", "kCPY",
|
||||
"key_screate", "kCRT",
|
||||
"key_sdc", "kDC",
|
||||
"key_sdl", "kDL",
|
||||
"key_select", "kslt",
|
||||
"key_send", "kEND",
|
||||
"key_seol", "kEOL",
|
||||
"key_sexit", "kEXT",
|
||||
"key_sfind", "kFND",
|
||||
"key_shelp", "kHLP",
|
||||
"key_shome", "kHOM",
|
||||
"key_sic", "kIC",
|
||||
"key_sleft", "kLFT",
|
||||
"key_smessage", "kMSG",
|
||||
"key_smove", "kMOV",
|
||||
"key_snext", "kNXT",
|
||||
"key_soptions", "kOPT",
|
||||
"key_sprevious", "kPRV",
|
||||
"key_sprint", "kPRT",
|
||||
"key_sredo", "kRDO",
|
||||
"key_sreplace", "kRPL",
|
||||
"key_sright", "kRIT",
|
||||
"key_srsume", "kRES",
|
||||
"key_ssave", "kSAV",
|
||||
"key_ssuspend", "kSPD",
|
||||
"key_sundo", "kUND",
|
||||
"req_for_input", "rfi",
|
||||
"key_f11", "kf11",
|
||||
"key_f12", "kf12",
|
||||
"key_f13", "kf13",
|
||||
"key_f14", "kf14",
|
||||
"key_f15", "kf15",
|
||||
"key_f16", "kf16",
|
||||
"key_f17", "kf17",
|
||||
"key_f18", "kf18",
|
||||
"key_f19", "kf19",
|
||||
"key_f20", "kf20",
|
||||
"key_f21", "kf21",
|
||||
"key_f22", "kf22",
|
||||
"key_f23", "kf23",
|
||||
"key_f24", "kf24",
|
||||
"key_f25", "kf25",
|
||||
"key_f26", "kf26",
|
||||
"key_f27", "kf27",
|
||||
"key_f28", "kf28",
|
||||
"key_f29", "kf29",
|
||||
"key_f30", "kf30",
|
||||
"key_f31", "kf31",
|
||||
"key_f32", "kf32",
|
||||
"key_f33", "kf33",
|
||||
"key_f34", "kf34",
|
||||
"key_f35", "kf35",
|
||||
"key_f36", "kf36",
|
||||
"key_f37", "kf37",
|
||||
"key_f38", "kf38",
|
||||
"key_f39", "kf39",
|
||||
"key_f40", "kf40",
|
||||
"key_f41", "kf41",
|
||||
"key_f42", "kf42",
|
||||
"key_f43", "kf43",
|
||||
"key_f44", "kf44",
|
||||
"key_f45", "kf45",
|
||||
"key_f46", "kf46",
|
||||
"key_f47", "kf47",
|
||||
"key_f48", "kf48",
|
||||
"key_f49", "kf49",
|
||||
"key_f50", "kf50",
|
||||
"key_f51", "kf51",
|
||||
"key_f52", "kf52",
|
||||
"key_f53", "kf53",
|
||||
"key_f54", "kf54",
|
||||
"key_f55", "kf55",
|
||||
"key_f56", "kf56",
|
||||
"key_f57", "kf57",
|
||||
"key_f58", "kf58",
|
||||
"key_f59", "kf59",
|
||||
"key_f60", "kf60",
|
||||
"key_f61", "kf61",
|
||||
"key_f62", "kf62",
|
||||
"key_f63", "kf63",
|
||||
"clr_bol", "el1",
|
||||
"clear_margins", "mgc",
|
||||
"set_left_margin", "smgl",
|
||||
"set_right_margin", "smgr",
|
||||
"label_format", "fln",
|
||||
"set_clock", "sclk",
|
||||
"display_clock", "dclk",
|
||||
"remove_clock", "rmclk",
|
||||
"create_window", "cwin",
|
||||
"goto_window", "wingo",
|
||||
"hangup", "hup",
|
||||
"dial_phone", "dial",
|
||||
"quick_dial", "qdial",
|
||||
"tone", "tone",
|
||||
"pulse", "pulse",
|
||||
"flash_hook", "hook",
|
||||
"fixed_pause", "pause",
|
||||
"wait_tone", "wait",
|
||||
"user0", "u0",
|
||||
"user1", "u1",
|
||||
"user2", "u2",
|
||||
"user3", "u3",
|
||||
"user4", "u4",
|
||||
"user5", "u5",
|
||||
"user6", "u6",
|
||||
"user7", "u7",
|
||||
"user8", "u8",
|
||||
"user9", "u9",
|
||||
"orig_pair", "op",
|
||||
"orig_colors", "oc",
|
||||
"initialize_color", "initc",
|
||||
"initialize_pair", "initp",
|
||||
"set_color_pair", "scp",
|
||||
"set_foreground", "setf",
|
||||
"set_background", "setb",
|
||||
"change_char_pitch", "cpi",
|
||||
"change_line_pitch", "lpi",
|
||||
"change_res_horz", "chr",
|
||||
"change_res_vert", "cvr",
|
||||
"define_char", "defc",
|
||||
"enter_doublewide_mode", "swidm",
|
||||
"enter_draft_quality", "sdrfq",
|
||||
"enter_italics_mode", "sitm",
|
||||
"enter_leftward_mode", "slm",
|
||||
"enter_micro_mode", "smicm",
|
||||
"enter_near_letter_quality", "snlq",
|
||||
"enter_normal_quality", "snrmq",
|
||||
"enter_shadow_mode", "sshm",
|
||||
"enter_subscript_mode", "ssubm",
|
||||
"enter_superscript_mode", "ssupm",
|
||||
"enter_upward_mode", "sum",
|
||||
"exit_doublewide_mode", "rwidm",
|
||||
"exit_italics_mode", "ritm",
|
||||
"exit_leftward_mode", "rlm",
|
||||
"exit_micro_mode", "rmicm",
|
||||
"exit_shadow_mode", "rshm",
|
||||
"exit_subscript_mode", "rsubm",
|
||||
"exit_superscript_mode", "rsupm",
|
||||
"exit_upward_mode", "rum",
|
||||
"micro_column_address", "mhpa",
|
||||
"micro_down", "mcud1",
|
||||
"micro_left", "mcub1",
|
||||
"micro_right", "mcuf1",
|
||||
"micro_row_address", "mvpa",
|
||||
"micro_up", "mcuu1",
|
||||
"order_of_pins", "porder",
|
||||
"parm_down_micro", "mcud",
|
||||
"parm_left_micro", "mcub",
|
||||
"parm_right_micro", "mcuf",
|
||||
"parm_up_micro", "mcuu",
|
||||
"select_char_set", "scs",
|
||||
"set_bottom_margin", "smgb",
|
||||
"set_bottom_margin_parm", "smgbp",
|
||||
"set_left_margin_parm", "smglp",
|
||||
"set_right_margin_parm", "smgrp",
|
||||
"set_top_margin", "smgt",
|
||||
"set_top_margin_parm", "smgtp",
|
||||
"start_bit_image", "sbim",
|
||||
"start_char_set_def", "scsd",
|
||||
"stop_bit_image", "rbim",
|
||||
"stop_char_set_def", "rcsd",
|
||||
"subscript_characters", "subcs",
|
||||
"superscript_characters", "supcs",
|
||||
"these_cause_cr", "docr",
|
||||
"zero_motion", "zerom",
|
||||
"char_set_names", "csnm",
|
||||
"key_mouse", "kmous",
|
||||
"mouse_info", "minfo",
|
||||
"req_mouse_pos", "reqmp",
|
||||
"get_mouse", "getm",
|
||||
"set_a_foreground", "setaf",
|
||||
"set_a_background", "setab",
|
||||
"pkey_plab", "pfxl",
|
||||
"device_type", "devt",
|
||||
"code_set_init", "csin",
|
||||
"set0_des_seq", "s0ds",
|
||||
"set1_des_seq", "s1ds",
|
||||
"set2_des_seq", "s2ds",
|
||||
"set3_des_seq", "s3ds",
|
||||
"set_lr_margin", "smglr",
|
||||
"set_tb_margin", "smgtb",
|
||||
"bit_image_repeat", "birep",
|
||||
"bit_image_newline", "binel",
|
||||
"bit_image_carriage_return", "bicr",
|
||||
"color_names", "colornm",
|
||||
"define_bit_image_region", "defbi",
|
||||
"end_bit_image_region", "endbi",
|
||||
"set_color_band", "setcolor",
|
||||
"set_page_length", "slines",
|
||||
"display_pc_char", "dispc",
|
||||
"enter_pc_charset_mode", "smpch",
|
||||
"exit_pc_charset_mode", "rmpch",
|
||||
"enter_scancode_mode", "smsc",
|
||||
"exit_scancode_mode", "rmsc",
|
||||
"pc_term_options", "pctrm",
|
||||
"scancode_escape", "scesc",
|
||||
"alt_scancode_esc", "scesa",
|
||||
"enter_horizontal_hl_mode", "ehhlm",
|
||||
"enter_left_hl_mode", "elhlm",
|
||||
"enter_low_hl_mode", "elohlm",
|
||||
"enter_right_hl_mode", "erhlm",
|
||||
"enter_top_hl_mode", "ethlm",
|
||||
"enter_vertical_hl_mode", "evhlm",
|
||||
"set_a_attributes", "sgr1",
|
||||
"set_pglen_inch", "slength",
|
||||
"termcap_init2", "",
|
||||
"termcap_reset", "",
|
||||
"linefeed_if_not_lf", "",
|
||||
"backspace_if_not_bs", "",
|
||||
"other_non_function_keys", "",
|
||||
"arrow_key_map", "",
|
||||
"acs_ulcorner", "",
|
||||
"acs_llcorner", "",
|
||||
"acs_urcorner", "",
|
||||
"acs_lrcorner", "",
|
||||
"acs_ltee", "",
|
||||
"acs_rtee", "",
|
||||
"acs_btee", "",
|
||||
"acs_ttee", "",
|
||||
"acs_hline", "",
|
||||
"acs_vline", "",
|
||||
"acs_plus", "",
|
||||
"memory_lock", "",
|
||||
"memory_unlock", "",
|
||||
"box_chars_1", "",
|
||||
}
|
244
vendor/github.com/Nvveen/Gotty/gotty.go
generated
vendored
244
vendor/github.com/Nvveen/Gotty/gotty.go
generated
vendored
|
@ -1,244 +0,0 @@
|
|||
// Copyright 2012 Neal van Veen. All rights reserved.
|
||||
// Usage of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Gotty is a Go-package for reading and parsing the terminfo database
|
||||
package gotty
|
||||
|
||||
// TODO add more concurrency to name lookup, look for more opportunities.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Open a terminfo file by the name given and construct a TermInfo object.
|
||||
// If something went wrong reading the terminfo database file, an error is
|
||||
// returned.
|
||||
func OpenTermInfo(termName string) (*TermInfo, error) {
|
||||
if len(termName) == 0 {
|
||||
return nil, errors.New("No termname given")
|
||||
}
|
||||
// Find the environment variables
|
||||
if termloc := os.Getenv("TERMINFO"); len(termloc) > 0 {
|
||||
return readTermInfo(path.Join(termloc, string(termName[0]), termName))
|
||||
} else {
|
||||
// Search like ncurses
|
||||
locations := []string{}
|
||||
if h := os.Getenv("HOME"); len(h) > 0 {
|
||||
locations = append(locations, path.Join(h, ".terminfo"))
|
||||
}
|
||||
locations = append(locations,
|
||||
"/etc/terminfo/",
|
||||
"/lib/terminfo/",
|
||||
"/usr/share/terminfo/")
|
||||
for _, str := range locations {
|
||||
term, err := readTermInfo(path.Join(str, string(termName[0]), termName))
|
||||
if err == nil {
|
||||
return term, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("No terminfo file(-location) found")
|
||||
}
|
||||
}
|
||||
|
||||
// Open a terminfo file from the environment variable containing the current
|
||||
// terminal name and construct a TermInfo object. If something went wrong
|
||||
// reading the terminfo database file, an error is returned.
|
||||
func OpenTermInfoEnv() (*TermInfo, error) {
|
||||
termenv := os.Getenv("TERM")
|
||||
return OpenTermInfo(termenv)
|
||||
}
|
||||
|
||||
// Return an attribute by the name attr provided. If none can be found,
|
||||
// an error is returned.
|
||||
func (term *TermInfo) GetAttribute(attr string) (stacker, error) {
|
||||
// Channel to store the main value in.
|
||||
var value stacker
|
||||
// Add a blocking WaitGroup
|
||||
var block sync.WaitGroup
|
||||
// Keep track of variable being written.
|
||||
written := false
|
||||
// Function to put into goroutine.
|
||||
f := func(ats interface{}) {
|
||||
var ok bool
|
||||
var v stacker
|
||||
// Switch on type of map to use and assign value to it.
|
||||
switch reflect.TypeOf(ats).Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
v, ok = ats.(map[string]bool)[attr]
|
||||
case reflect.Int16:
|
||||
v, ok = ats.(map[string]int16)[attr]
|
||||
case reflect.String:
|
||||
v, ok = ats.(map[string]string)[attr]
|
||||
}
|
||||
// If ok, a value is found, so we can write.
|
||||
if ok {
|
||||
value = v
|
||||
written = true
|
||||
}
|
||||
// Goroutine is done
|
||||
block.Done()
|
||||
}
|
||||
block.Add(3)
|
||||
// Go for all 3 attribute lists.
|
||||
go f(term.boolAttributes)
|
||||
go f(term.numAttributes)
|
||||
go f(term.strAttributes)
|
||||
// Wait until every goroutine is done.
|
||||
block.Wait()
|
||||
// If a value has been written, return it.
|
||||
if written {
|
||||
return value, nil
|
||||
}
|
||||
// Otherwise, error.
|
||||
return nil, fmt.Errorf("Erorr finding attribute")
|
||||
}
|
||||
|
||||
// Return an attribute by the name attr provided. If none can be found,
|
||||
// an error is returned. A name is first converted to its termcap value.
|
||||
func (term *TermInfo) GetAttributeName(name string) (stacker, error) {
|
||||
tc := GetTermcapName(name)
|
||||
return term.GetAttribute(tc)
|
||||
}
|
||||
|
||||
// A utility function that finds and returns the termcap equivalent of a
|
||||
// variable name.
|
||||
func GetTermcapName(name string) string {
|
||||
// Termcap name
|
||||
var tc string
|
||||
// Blocking group
|
||||
var wait sync.WaitGroup
|
||||
// Function to put into a goroutine
|
||||
f := func(attrs []string) {
|
||||
// Find the string corresponding to the name
|
||||
for i, s := range attrs {
|
||||
if s == name {
|
||||
tc = attrs[i+1]
|
||||
}
|
||||
}
|
||||
// Goroutine is finished
|
||||
wait.Done()
|
||||
}
|
||||
wait.Add(3)
|
||||
// Go for all 3 attribute lists
|
||||
go f(BoolAttr[:])
|
||||
go f(NumAttr[:])
|
||||
go f(StrAttr[:])
|
||||
// Wait until every goroutine is done
|
||||
wait.Wait()
|
||||
// Return the termcap name
|
||||
return tc
|
||||
}
|
||||
|
||||
// This function takes a path to a terminfo file and reads it in binary
|
||||
// form to construct the actual TermInfo file.
|
||||
func readTermInfo(path string) (*TermInfo, error) {
|
||||
// Open the terminfo file
|
||||
file, err := os.Open(path)
|
||||
defer file.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize
|
||||
// Header is composed of the magic 0432 octal number, size of the name
|
||||
// section, size of the boolean section, the amount of number values,
|
||||
// the number of offsets of strings, and the size of the string section.
|
||||
var header [6]int16
|
||||
// Byte array is used to read in byte values
|
||||
var byteArray []byte
|
||||
// Short array is used to read in short values
|
||||
var shArray []int16
|
||||
// TermInfo object to store values
|
||||
var term TermInfo
|
||||
|
||||
// Read in the header
|
||||
err = binary.Read(file, binary.LittleEndian, &header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If magic number isn't there or isn't correct, we have the wrong filetype
|
||||
if header[0] != 0432 {
|
||||
return nil, errors.New(fmt.Sprintf("Wrong filetype"))
|
||||
}
|
||||
|
||||
// Read in the names
|
||||
byteArray = make([]byte, header[1])
|
||||
err = binary.Read(file, binary.LittleEndian, &byteArray)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
term.Names = strings.Split(string(byteArray), "|")
|
||||
|
||||
// Read in the booleans
|
||||
byteArray = make([]byte, header[2])
|
||||
err = binary.Read(file, binary.LittleEndian, &byteArray)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
term.boolAttributes = make(map[string]bool)
|
||||
for i, b := range byteArray {
|
||||
if b == 1 {
|
||||
term.boolAttributes[BoolAttr[i*2+1]] = true
|
||||
}
|
||||
}
|
||||
// If the number of bytes read is not even, a byte for alignment is added
|
||||
// We know the header is an even number of bytes so only need to check the
|
||||
// total of the names and booleans.
|
||||
if (header[1]+header[2])%2 != 0 {
|
||||
err = binary.Read(file, binary.LittleEndian, make([]byte, 1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Read in shorts
|
||||
shArray = make([]int16, header[3])
|
||||
err = binary.Read(file, binary.LittleEndian, &shArray)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
term.numAttributes = make(map[string]int16)
|
||||
for i, n := range shArray {
|
||||
if n != 0377 && n > -1 {
|
||||
term.numAttributes[NumAttr[i*2+1]] = n
|
||||
}
|
||||
}
|
||||
|
||||
// Read the offsets into the short array
|
||||
shArray = make([]int16, header[4])
|
||||
err = binary.Read(file, binary.LittleEndian, &shArray)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Read the actual strings in the byte array
|
||||
byteArray = make([]byte, header[5])
|
||||
err = binary.Read(file, binary.LittleEndian, &byteArray)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
term.strAttributes = make(map[string]string)
|
||||
// We get an offset, and then iterate until the string is null-terminated
|
||||
for i, offset := range shArray {
|
||||
if offset > -1 {
|
||||
if int(offset) >= len(byteArray) {
|
||||
return nil, errors.New("array out of bounds reading string section")
|
||||
}
|
||||
r := bytes.IndexByte(byteArray[offset:], 0)
|
||||
if r == -1 {
|
||||
return nil, errors.New("missing nul byte reading string section")
|
||||
}
|
||||
r += int(offset)
|
||||
term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r])
|
||||
}
|
||||
}
|
||||
return &term, nil
|
||||
}
|
362
vendor/github.com/Nvveen/Gotty/parser.go
generated
vendored
362
vendor/github.com/Nvveen/Gotty/parser.go
generated
vendored
|
@ -1,362 +0,0 @@
|
|||
// Copyright 2012 Neal van Veen. All rights reserved.
|
||||
// Usage of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package gotty
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var exp = [...]string{
|
||||
"%%",
|
||||
"%c",
|
||||
"%s",
|
||||
"%p(\\d)",
|
||||
"%P([A-z])",
|
||||
"%g([A-z])",
|
||||
"%'(.)'",
|
||||
"%{([0-9]+)}",
|
||||
"%l",
|
||||
"%\\+|%-|%\\*|%/|%m",
|
||||
"%&|%\\||%\\^",
|
||||
"%=|%>|%<",
|
||||
"%A|%O",
|
||||
"%!|%~",
|
||||
"%i",
|
||||
"%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]",
|
||||
"%\\?(.*?);",
|
||||
}
|
||||
|
||||
var regex *regexp.Regexp
|
||||
var staticVar map[byte]stacker
|
||||
|
||||
// Parses the attribute that is received with name attr and parameters params.
|
||||
func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) {
|
||||
// Get the attribute name first.
|
||||
iface, err := term.GetAttribute(attr)
|
||||
str, ok := iface.(string)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !ok {
|
||||
return str, errors.New("Only string capabilities can be parsed.")
|
||||
}
|
||||
// Construct the hidden parser struct so we can use a recursive stack based
|
||||
// parser.
|
||||
ps := &parser{}
|
||||
// Dynamic variables only exist in this context.
|
||||
ps.dynamicVar = make(map[byte]stacker, 26)
|
||||
ps.parameters = make([]stacker, len(params))
|
||||
// Convert the parameters to insert them into the parser struct.
|
||||
for i, x := range params {
|
||||
ps.parameters[i] = x
|
||||
}
|
||||
// Recursively walk and return.
|
||||
result, err := ps.walk(str)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Parses the attribute that is received with name attr and parameters params.
|
||||
// Only works on full name of a capability that is given, which it uses to
|
||||
// search for the termcap name.
|
||||
func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) {
|
||||
tc := GetTermcapName(attr)
|
||||
return term.Parse(tc, params)
|
||||
}
|
||||
|
||||
// Identify each token in a stack based manner and do the actual parsing.
|
||||
func (ps *parser) walk(attr string) (string, error) {
|
||||
// We use a buffer to get the modified string.
|
||||
var buf bytes.Buffer
|
||||
// Next, find and identify all tokens by their indices and strings.
|
||||
tokens := regex.FindAllStringSubmatch(attr, -1)
|
||||
if len(tokens) == 0 {
|
||||
return attr, nil
|
||||
}
|
||||
indices := regex.FindAllStringIndex(attr, -1)
|
||||
q := 0 // q counts the matches of one token
|
||||
// Iterate through the string per character.
|
||||
for i := 0; i < len(attr); i++ {
|
||||
// If the current position is an identified token, execute the following
|
||||
// steps.
|
||||
if q < len(indices) && i >= indices[q][0] && i < indices[q][1] {
|
||||
// Switch on token.
|
||||
switch {
|
||||
case tokens[q][0][:2] == "%%":
|
||||
// Literal percentage character.
|
||||
buf.WriteByte('%')
|
||||
case tokens[q][0][:2] == "%c":
|
||||
// Pop a character.
|
||||
c, err := ps.st.pop()
|
||||
if err != nil {
|
||||
return buf.String(), err
|
||||
}
|
||||
buf.WriteByte(c.(byte))
|
||||
case tokens[q][0][:2] == "%s":
|
||||
// Pop a string.
|
||||
str, err := ps.st.pop()
|
||||
if err != nil {
|
||||
return buf.String(), err
|
||||
}
|
||||
if _, ok := str.(string); !ok {
|
||||
return buf.String(), errors.New("Stack head is not a string")
|
||||
}
|
||||
buf.WriteString(str.(string))
|
||||
case tokens[q][0][:2] == "%p":
|
||||
// Push a parameter on the stack.
|
||||
index, err := strconv.ParseInt(tokens[q][1], 10, 8)
|
||||
index--
|
||||
if err != nil {
|
||||
return buf.String(), err
|
||||
}
|
||||
if int(index) >= len(ps.parameters) {
|
||||
return buf.String(), errors.New("Parameters index out of bound")
|
||||
}
|
||||
ps.st.push(ps.parameters[index])
|
||||
case tokens[q][0][:2] == "%P":
|
||||
// Pop a variable from the stack as a dynamic or static variable.
|
||||
val, err := ps.st.pop()
|
||||
if err != nil {
|
||||
return buf.String(), err
|
||||
}
|
||||
index := tokens[q][2]
|
||||
if len(index) > 1 {
|
||||
errorStr := fmt.Sprintf("%s is not a valid dynamic variables index",
|
||||
index)
|
||||
return buf.String(), errors.New(errorStr)
|
||||
}
|
||||
// Specify either dynamic or static.
|
||||
if index[0] >= 'a' && index[0] <= 'z' {
|
||||
ps.dynamicVar[index[0]] = val
|
||||
} else if index[0] >= 'A' && index[0] <= 'Z' {
|
||||
staticVar[index[0]] = val
|
||||
}
|
||||
case tokens[q][0][:2] == "%g":
|
||||
// Push a variable from the stack as a dynamic or static variable.
|
||||
index := tokens[q][3]
|
||||
if len(index) > 1 {
|
||||
errorStr := fmt.Sprintf("%s is not a valid static variables index",
|
||||
index)
|
||||
return buf.String(), errors.New(errorStr)
|
||||
}
|
||||
var val stacker
|
||||
if index[0] >= 'a' && index[0] <= 'z' {
|
||||
val = ps.dynamicVar[index[0]]
|
||||
} else if index[0] >= 'A' && index[0] <= 'Z' {
|
||||
val = staticVar[index[0]]
|
||||
}
|
||||
ps.st.push(val)
|
||||
case tokens[q][0][:2] == "%'":
|
||||
// Push a character constant.
|
||||
con := tokens[q][4]
|
||||
if len(con) > 1 {
|
||||
errorStr := fmt.Sprintf("%s is not a valid character constant", con)
|
||||
return buf.String(), errors.New(errorStr)
|
||||
}
|
||||
ps.st.push(con[0])
|
||||
case tokens[q][0][:2] == "%{":
|
||||
// Push an integer constant.
|
||||
con, err := strconv.ParseInt(tokens[q][5], 10, 32)
|
||||
if err != nil {
|
||||
return buf.String(), err
|
||||
}
|
||||
ps.st.push(con)
|
||||
case tokens[q][0][:2] == "%l":
|
||||
// Push the length of the string that is popped from the stack.
|
||||
popStr, err := ps.st.pop()
|
||||
if err != nil {
|
||||
return buf.String(), err
|
||||
}
|
||||
if _, ok := popStr.(string); !ok {
|
||||
errStr := fmt.Sprintf("Stack head is not a string")
|
||||
return buf.String(), errors.New(errStr)
|
||||
}
|
||||
ps.st.push(len(popStr.(string)))
|
||||
case tokens[q][0][:2] == "%?":
|
||||
// If-then-else construct. First, the whole string is identified and
|
||||
// then inside this substring, we can specify which parts to switch on.
|
||||
ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);")
|
||||
ifTokens := ifReg.FindStringSubmatch(tokens[q][0])
|
||||
var (
|
||||
ifStr string
|
||||
err error
|
||||
)
|
||||
// Parse the if-part to determine if-else.
|
||||
if len(ifTokens[1]) > 0 {
|
||||
ifStr, err = ps.walk(ifTokens[1])
|
||||
} else { // else
|
||||
ifStr, err = ps.walk(ifTokens[4])
|
||||
}
|
||||
// Return any errors
|
||||
if err != nil {
|
||||
return buf.String(), err
|
||||
} else if len(ifStr) > 0 {
|
||||
// Self-defined limitation, not sure if this is correct, but didn't
|
||||
// seem like it.
|
||||
return buf.String(), errors.New("If-clause cannot print statements")
|
||||
}
|
||||
var thenStr string
|
||||
// Pop the first value that is set by parsing the if-clause.
|
||||
choose, err := ps.st.pop()
|
||||
if err != nil {
|
||||
return buf.String(), err
|
||||
}
|
||||
// Switch to if or else.
|
||||
if choose.(int) == 0 && len(ifTokens[1]) > 0 {
|
||||
thenStr, err = ps.walk(ifTokens[3])
|
||||
} else if choose.(int) != 0 {
|
||||
if len(ifTokens[1]) > 0 {
|
||||
thenStr, err = ps.walk(ifTokens[2])
|
||||
} else {
|
||||
thenStr, err = ps.walk(ifTokens[5])
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return buf.String(), err
|
||||
}
|
||||
buf.WriteString(thenStr)
|
||||
case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing
|
||||
fallthrough
|
||||
case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits.
|
||||
fallthrough
|
||||
case tokens[q][0][len(tokens[q][0])-1] == 'x':
|
||||
fallthrough
|
||||
case tokens[q][0][len(tokens[q][0])-1] == 'X':
|
||||
fallthrough
|
||||
case tokens[q][0][len(tokens[q][0])-1] == 's':
|
||||
token := tokens[q][0]
|
||||
// Remove the : that comes before a flag.
|
||||
if token[1] == ':' {
|
||||
token = token[:1] + token[2:]
|
||||
}
|
||||
digit, err := ps.st.pop()
|
||||
if err != nil {
|
||||
return buf.String(), err
|
||||
}
|
||||
// The rest is determined like the normal formatted prints.
|
||||
digitStr := fmt.Sprintf(token, digit.(int))
|
||||
buf.WriteString(digitStr)
|
||||
case tokens[q][0][:2] == "%i":
|
||||
// Increment the parameters by one.
|
||||
if len(ps.parameters) < 2 {
|
||||
return buf.String(), errors.New("Not enough parameters to increment.")
|
||||
}
|
||||
val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int)
|
||||
val1++
|
||||
val2++
|
||||
ps.parameters[0], ps.parameters[1] = val1, val2
|
||||
default:
|
||||
// The rest of the tokens is a special case, where two values are
|
||||
// popped and then operated on by the token that comes after them.
|
||||
op1, err := ps.st.pop()
|
||||
if err != nil {
|
||||
return buf.String(), err
|
||||
}
|
||||
op2, err := ps.st.pop()
|
||||
if err != nil {
|
||||
return buf.String(), err
|
||||
}
|
||||
var result stacker
|
||||
switch tokens[q][0][:2] {
|
||||
case "%+":
|
||||
// Addition
|
||||
result = op2.(int) + op1.(int)
|
||||
case "%-":
|
||||
// Subtraction
|
||||
result = op2.(int) - op1.(int)
|
||||
case "%*":
|
||||
// Multiplication
|
||||
result = op2.(int) * op1.(int)
|
||||
case "%/":
|
||||
// Division
|
||||
result = op2.(int) / op1.(int)
|
||||
case "%m":
|
||||
// Modulo
|
||||
result = op2.(int) % op1.(int)
|
||||
case "%&":
|
||||
// Bitwise AND
|
||||
result = op2.(int) & op1.(int)
|
||||
case "%|":
|
||||
// Bitwise OR
|
||||
result = op2.(int) | op1.(int)
|
||||
case "%^":
|
||||
// Bitwise XOR
|
||||
result = op2.(int) ^ op1.(int)
|
||||
case "%=":
|
||||
// Equals
|
||||
result = op2 == op1
|
||||
case "%>":
|
||||
// Greater-than
|
||||
result = op2.(int) > op1.(int)
|
||||
case "%<":
|
||||
// Lesser-than
|
||||
result = op2.(int) < op1.(int)
|
||||
case "%A":
|
||||
// Logical AND
|
||||
result = op2.(bool) && op1.(bool)
|
||||
case "%O":
|
||||
// Logical OR
|
||||
result = op2.(bool) || op1.(bool)
|
||||
case "%!":
|
||||
// Logical complement
|
||||
result = !op1.(bool)
|
||||
case "%~":
|
||||
// Bitwise complement
|
||||
result = ^(op1.(int))
|
||||
}
|
||||
ps.st.push(result)
|
||||
}
|
||||
|
||||
i = indices[q][1] - 1
|
||||
q++
|
||||
} else {
|
||||
// We are not "inside" a token, so just skip until the end or the next
|
||||
// token, and add all characters to the buffer.
|
||||
j := i
|
||||
if q != len(indices) {
|
||||
for !(j >= indices[q][0] && j < indices[q][1]) {
|
||||
j++
|
||||
}
|
||||
} else {
|
||||
j = len(attr)
|
||||
}
|
||||
buf.WriteString(string(attr[i:j]))
|
||||
i = j
|
||||
}
|
||||
}
|
||||
// Return the buffer as a string.
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// Push a stacker-value onto the stack.
|
||||
func (st *stack) push(s stacker) {
|
||||
*st = append(*st, s)
|
||||
}
|
||||
|
||||
// Pop a stacker-value from the stack.
|
||||
func (st *stack) pop() (stacker, error) {
|
||||
if len(*st) == 0 {
|
||||
return nil, errors.New("Stack is empty.")
|
||||
}
|
||||
newStack := make(stack, len(*st)-1)
|
||||
val := (*st)[len(*st)-1]
|
||||
copy(newStack, (*st)[:len(*st)-1])
|
||||
*st = newStack
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Initialize regexes and the static vars (that don't get changed between
|
||||
// calls.
|
||||
func init() {
|
||||
// Initialize the main regex.
|
||||
expStr := strings.Join(exp[:], "|")
|
||||
regex, _ = regexp.Compile(expStr)
|
||||
// Initialize the static variables.
|
||||
staticVar = make(map[byte]stacker, 26)
|
||||
}
|
23
vendor/github.com/Nvveen/Gotty/types.go
generated
vendored
23
vendor/github.com/Nvveen/Gotty/types.go
generated
vendored
|
@ -1,23 +0,0 @@
|
|||
// Copyright 2012 Neal van Veen. All rights reserved.
|
||||
// Usage of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package gotty
|
||||
|
||||
type TermInfo struct {
|
||||
boolAttributes map[string]bool
|
||||
numAttributes map[string]int16
|
||||
strAttributes map[string]string
|
||||
// The various names of the TermInfo file.
|
||||
Names []string
|
||||
}
|
||||
|
||||
type stacker interface {
|
||||
}
|
||||
type stack []stacker
|
||||
|
||||
type parser struct {
|
||||
st stack
|
||||
parameters []stacker
|
||||
dynamicVar map[byte]stacker
|
||||
}
|
19
vendor/github.com/containerd/continuity/LICENSE
generated
vendored
19
vendor/github.com/containerd/continuity/LICENSE
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
|
@ -175,28 +176,16 @@
|
|||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
|
|
172
vendor/github.com/containerd/continuity/fs/copy.go
generated
vendored
Normal file
172
vendor/github.com/containerd/continuity/fs/copy.go
generated
vendored
Normal file
|
@ -0,0 +1,172 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
buffer := make([]byte, 32*1024)
|
||||
return &buffer
|
||||
},
|
||||
}
|
||||
|
||||
// XAttrErrorHandlers transform a non-nil xattr error.
|
||||
// Return nil to ignore an error.
|
||||
// xattrKey can be empty for listxattr operation.
|
||||
type XAttrErrorHandler func(dst, src, xattrKey string, err error) error
|
||||
|
||||
type copyDirOpts struct {
|
||||
xeh XAttrErrorHandler
|
||||
}
|
||||
|
||||
type CopyDirOpt func(*copyDirOpts) error
|
||||
|
||||
// WithXAttrErrorHandler allows specifying XAttrErrorHandler
|
||||
// If nil XAttrErrorHandler is specified (default), CopyDir stops
|
||||
// on a non-nil xattr error.
|
||||
func WithXAttrErrorHandler(xeh XAttrErrorHandler) CopyDirOpt {
|
||||
return func(o *copyDirOpts) error {
|
||||
o.xeh = xeh
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAllowXAttrErrors allows ignoring xattr errors.
|
||||
func WithAllowXAttrErrors() CopyDirOpt {
|
||||
xeh := func(dst, src, xattrKey string, err error) error {
|
||||
return nil
|
||||
}
|
||||
return WithXAttrErrorHandler(xeh)
|
||||
}
|
||||
|
||||
// CopyDir copies the directory from src to dst.
|
||||
// Most efficient copy of files is attempted.
|
||||
func CopyDir(dst, src string, opts ...CopyDirOpt) error {
|
||||
var o copyDirOpts
|
||||
for _, opt := range opts {
|
||||
if err := opt(&o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
inodes := map[uint64]string{}
|
||||
return copyDirectory(dst, src, inodes, &o)
|
||||
}
|
||||
|
||||
func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) error {
|
||||
stat, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to stat %s", src)
|
||||
}
|
||||
if !stat.IsDir() {
|
||||
return errors.Errorf("source is not directory")
|
||||
}
|
||||
|
||||
if st, err := os.Stat(dst); err != nil {
|
||||
if err := os.Mkdir(dst, stat.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to mkdir %s", dst)
|
||||
}
|
||||
} else if !st.IsDir() {
|
||||
return errors.Errorf("cannot copy to non-directory: %s", dst)
|
||||
} else {
|
||||
if err := os.Chmod(dst, stat.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod on %s", dst)
|
||||
}
|
||||
}
|
||||
|
||||
fis, err := ioutil.ReadDir(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read %s", src)
|
||||
}
|
||||
|
||||
if err := copyFileInfo(stat, dst); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy file info for %s", dst)
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
source := filepath.Join(src, fi.Name())
|
||||
target := filepath.Join(dst, fi.Name())
|
||||
|
||||
switch {
|
||||
case fi.IsDir():
|
||||
if err := copyDirectory(target, source, inodes, o); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
case (fi.Mode() & os.ModeType) == 0:
|
||||
link, err := getLinkSource(target, fi, inodes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get hardlink")
|
||||
}
|
||||
if link != "" {
|
||||
if err := os.Link(link, target); err != nil {
|
||||
return errors.Wrap(err, "failed to create hard link")
|
||||
}
|
||||
} else if err := CopyFile(target, source); err != nil {
|
||||
return errors.Wrap(err, "failed to copy files")
|
||||
}
|
||||
case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink:
|
||||
link, err := os.Readlink(source)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read link: %s", source)
|
||||
}
|
||||
if err := os.Symlink(link, target); err != nil {
|
||||
return errors.Wrapf(err, "failed to create symlink: %s", target)
|
||||
}
|
||||
case (fi.Mode() & os.ModeDevice) == os.ModeDevice:
|
||||
if err := copyDevice(target, fi); err != nil {
|
||||
return errors.Wrapf(err, "failed to create device")
|
||||
}
|
||||
default:
|
||||
// TODO: Support pipes and sockets
|
||||
return errors.Wrapf(err, "unsupported mode %s", fi.Mode())
|
||||
}
|
||||
if err := copyFileInfo(fi, target); err != nil {
|
||||
return errors.Wrap(err, "failed to copy file info")
|
||||
}
|
||||
|
||||
if err := copyXAttrs(target, source, o.xeh); err != nil {
|
||||
return errors.Wrap(err, "failed to copy xattrs")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyFile copies the source file to the target.
|
||||
// The most efficient means of copying is used for the platform.
|
||||
func CopyFile(target, source string) error {
|
||||
src, err := os.Open(source)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open source %s", source)
|
||||
}
|
||||
defer src.Close()
|
||||
tgt, err := os.Create(target)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open target %s", target)
|
||||
}
|
||||
defer tgt.Close()
|
||||
|
||||
return copyFileContent(tgt, src)
|
||||
}
|
144
vendor/github.com/containerd/continuity/fs/copy_linux.go
generated
vendored
Normal file
144
vendor/github.com/containerd/continuity/fs/copy_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
|
||||
if os.IsPermission(err) {
|
||||
// Normally if uid/gid are the same this would be a no-op, but some
|
||||
// filesystems may still return EPERM... for instance NFS does this.
|
||||
// In such a case, this is not an error.
|
||||
if dstStat, err2 := os.Lstat(name); err2 == nil {
|
||||
st2 := dstStat.Sys().(*syscall.Stat_t)
|
||||
if st.Uid == st2.Uid && st.Gid == st2.Gid {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to chown %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
|
||||
if err := os.Chmod(name, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))}
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const maxSSizeT = int64(^uint(0) >> 1)
|
||||
|
||||
func copyFileContent(dst, src *os.File) error {
|
||||
st, err := src.Stat()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to stat source")
|
||||
}
|
||||
|
||||
size := st.Size()
|
||||
first := true
|
||||
srcFd := int(src.Fd())
|
||||
dstFd := int(dst.Fd())
|
||||
|
||||
for size > 0 {
|
||||
// Ensure that we are never trying to copy more than SSIZE_MAX at a
|
||||
// time and at the same time avoids overflows when the file is larger
|
||||
// than 4GB on 32-bit systems.
|
||||
var copySize int
|
||||
if size > maxSSizeT {
|
||||
copySize = int(maxSSizeT)
|
||||
} else {
|
||||
copySize = int(size)
|
||||
}
|
||||
n, err := unix.CopyFileRange(srcFd, nil, dstFd, nil, copySize, 0)
|
||||
if err != nil {
|
||||
if (err != unix.ENOSYS && err != unix.EXDEV) || !first {
|
||||
return errors.Wrap(err, "copy file range failed")
|
||||
}
|
||||
|
||||
buf := bufferPool.Get().(*[]byte)
|
||||
_, err = io.CopyBuffer(dst, src, *buf)
|
||||
bufferPool.Put(buf)
|
||||
return errors.Wrap(err, "userspace copy failed")
|
||||
}
|
||||
|
||||
first = false
|
||||
size -= int64(n)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error {
|
||||
xattrKeys, err := sysx.LListxattr(src)
|
||||
if err != nil {
|
||||
e := errors.Wrapf(err, "failed to list xattrs on %s", src)
|
||||
if xeh != nil {
|
||||
e = xeh(dst, src, "", e)
|
||||
}
|
||||
return e
|
||||
}
|
||||
for _, xattr := range xattrKeys {
|
||||
data, err := sysx.LGetxattr(src, xattr)
|
||||
if err != nil {
|
||||
e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
|
||||
if xeh != nil {
|
||||
if e = xeh(dst, src, xattr, e); e == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
|
||||
e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
|
||||
if xeh != nil {
|
||||
if e = xeh(dst, src, xattr, e); e == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDevice(dst string, fi os.FileInfo) error {
|
||||
st, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.New("unsupported stat type")
|
||||
}
|
||||
return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
|
||||
}
|
112
vendor/github.com/containerd/continuity/fs/copy_unix.go
generated
vendored
Normal file
112
vendor/github.com/containerd/continuity/fs/copy_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
// +build solaris darwin freebsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
|
||||
if os.IsPermission(err) {
|
||||
// Normally if uid/gid are the same this would be a no-op, but some
|
||||
// filesystems may still return EPERM... for instance NFS does this.
|
||||
// In such a case, this is not an error.
|
||||
if dstStat, err2 := os.Lstat(name); err2 == nil {
|
||||
st2 := dstStat.Sys().(*syscall.Stat_t)
|
||||
if st.Uid == st2.Uid && st.Gid == st2.Gid {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to chown %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
|
||||
if err := os.Chmod(name, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
timespec := []syscall.Timespec{StatAtime(st), StatMtime(st)}
|
||||
if err := syscall.UtimesNano(name, timespec); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFileContent(dst, src *os.File) error {
|
||||
buf := bufferPool.Get().(*[]byte)
|
||||
_, err := io.CopyBuffer(dst, src, *buf)
|
||||
bufferPool.Put(buf)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error {
|
||||
xattrKeys, err := sysx.LListxattr(src)
|
||||
if err != nil {
|
||||
e := errors.Wrapf(err, "failed to list xattrs on %s", src)
|
||||
if xeh != nil {
|
||||
e = xeh(dst, src, "", e)
|
||||
}
|
||||
return e
|
||||
}
|
||||
for _, xattr := range xattrKeys {
|
||||
data, err := sysx.LGetxattr(src, xattr)
|
||||
if err != nil {
|
||||
e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
|
||||
if xeh != nil {
|
||||
if e = xeh(dst, src, xattr, e); e == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
|
||||
e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
|
||||
if xeh != nil {
|
||||
if e = xeh(dst, src, xattr, e); e == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDevice(dst string, fi os.FileInfo) error {
|
||||
st, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.New("unsupported stat type")
|
||||
}
|
||||
return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
|
||||
}
|
49
vendor/github.com/containerd/continuity/fs/copy_windows.go
generated
vendored
Normal file
49
vendor/github.com/containerd/continuity/fs/copy_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||
if err := os.Chmod(name, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||
}
|
||||
|
||||
// TODO: copy windows specific metadata
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFileContent(dst, src *os.File) error {
|
||||
buf := bufferPool.Get().(*[]byte)
|
||||
_, err := io.CopyBuffer(dst, src, *buf)
|
||||
bufferPool.Put(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDevice(dst string, fi os.FileInfo) error {
|
||||
return errors.New("device copy not supported")
|
||||
}
|
326
vendor/github.com/containerd/continuity/fs/diff.go
generated
vendored
Normal file
326
vendor/github.com/containerd/continuity/fs/diff.go
generated
vendored
Normal file
|
@ -0,0 +1,326 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ChangeKind is the type of modification that
|
||||
// a change is making.
|
||||
type ChangeKind int
|
||||
|
||||
const (
|
||||
// ChangeKindUnmodified represents an unmodified
|
||||
// file
|
||||
ChangeKindUnmodified = iota
|
||||
|
||||
// ChangeKindAdd represents an addition of
|
||||
// a file
|
||||
ChangeKindAdd
|
||||
|
||||
// ChangeKindModify represents a change to
|
||||
// an existing file
|
||||
ChangeKindModify
|
||||
|
||||
// ChangeKindDelete represents a delete of
|
||||
// a file
|
||||
ChangeKindDelete
|
||||
)
|
||||
|
||||
func (k ChangeKind) String() string {
|
||||
switch k {
|
||||
case ChangeKindUnmodified:
|
||||
return "unmodified"
|
||||
case ChangeKindAdd:
|
||||
return "add"
|
||||
case ChangeKindModify:
|
||||
return "modify"
|
||||
case ChangeKindDelete:
|
||||
return "delete"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// Change represents single change between a diff and its parent.
|
||||
type Change struct {
|
||||
Kind ChangeKind
|
||||
Path string
|
||||
}
|
||||
|
||||
// ChangeFunc is the type of function called for each change
|
||||
// computed during a directory changes calculation.
|
||||
type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error
|
||||
|
||||
// Changes computes changes between two directories calling the
|
||||
// given change function for each computed change. The first
|
||||
// directory is intended to the base directory and second
|
||||
// directory the changed directory.
|
||||
//
|
||||
// The change callback is called by the order of path names and
|
||||
// should be appliable in that order.
|
||||
// Due to this apply ordering, the following is true
|
||||
// - Removed directory trees only create a single change for the root
|
||||
// directory removed. Remaining changes are implied.
|
||||
// - A directory which is modified to become a file will not have
|
||||
// delete entries for sub-path items, their removal is implied
|
||||
// by the removal of the parent directory.
|
||||
//
|
||||
// Opaque directories will not be treated specially and each file
|
||||
// removed from the base directory will show up as a removal.
|
||||
//
|
||||
// File content comparisons will be done on files which have timestamps
|
||||
// which may have been truncated. If either of the files being compared
|
||||
// has a zero value nanosecond value, each byte will be compared for
|
||||
// differences. If 2 files have the same seconds value but different
|
||||
// nanosecond values where one of those values is zero, the files will
|
||||
// be considered unchanged if the content is the same. This behavior
|
||||
// is to account for timestamp truncation during archiving.
|
||||
func Changes(ctx context.Context, a, b string, changeFn ChangeFunc) error {
|
||||
if a == "" {
|
||||
logrus.Debugf("Using single walk diff for %s", b)
|
||||
return addDirChanges(ctx, changeFn, b)
|
||||
} else if diffOptions := detectDirDiff(b, a); diffOptions != nil {
|
||||
logrus.Debugf("Using single walk diff for %s from %s", diffOptions.diffDir, a)
|
||||
return diffDirChanges(ctx, changeFn, a, diffOptions)
|
||||
}
|
||||
|
||||
logrus.Debugf("Using double walk diff for %s from %s", b, a)
|
||||
return doubleWalkDiff(ctx, changeFn, a, b)
|
||||
}
|
||||
|
||||
func addDirChanges(ctx context.Context, changeFn ChangeFunc, root string) error {
|
||||
return filepath.Walk(root, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(root, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path = filepath.Join(string(os.PathSeparator), path)
|
||||
|
||||
// Skip root
|
||||
if path == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return changeFn(ChangeKindAdd, path, f, nil)
|
||||
})
|
||||
}
|
||||
|
||||
// diffDirOptions is used when the diff can be directly calculated from
|
||||
// a diff directory to its base, without walking both trees.
|
||||
type diffDirOptions struct {
|
||||
diffDir string
|
||||
skipChange func(string) (bool, error)
|
||||
deleteChange func(string, string, os.FileInfo) (string, error)
|
||||
}
|
||||
|
||||
// diffDirChanges walks the diff directory and compares changes against the base.
|
||||
func diffDirChanges(ctx context.Context, changeFn ChangeFunc, base string, o *diffDirOptions) error {
|
||||
changedDirs := make(map[string]struct{})
|
||||
return filepath.Walk(o.diffDir, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(o.diffDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path = filepath.Join(string(os.PathSeparator), path)
|
||||
|
||||
// Skip root
|
||||
if path == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: handle opaqueness, start new double walker at this
|
||||
// location to get deletes, and skip tree in single walker
|
||||
|
||||
if o.skipChange != nil {
|
||||
if skip, err := o.skipChange(path); skip {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var kind ChangeKind
|
||||
|
||||
deletedFile, err := o.deleteChange(o.diffDir, path, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find out what kind of modification happened
|
||||
if deletedFile != "" {
|
||||
path = deletedFile
|
||||
kind = ChangeKindDelete
|
||||
f = nil
|
||||
} else {
|
||||
// Otherwise, the file was added
|
||||
kind = ChangeKindAdd
|
||||
|
||||
// ...Unless it already existed in a base, in which case, it's a modification
|
||||
stat, err := os.Stat(filepath.Join(base, path))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
// The file existed in the base, so that's a modification
|
||||
|
||||
// However, if it's a directory, maybe it wasn't actually modified.
|
||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||
if stat.IsDir() && f.IsDir() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||
// Both directories are the same, don't record the change
|
||||
return nil
|
||||
}
|
||||
}
|
||||
kind = ChangeKindModify
|
||||
}
|
||||
}
|
||||
|
||||
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
||||
// This block is here to ensure the change is recorded even if the
|
||||
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
|
||||
// Check https://github.com/docker/docker/pull/13590 for details.
|
||||
if f.IsDir() {
|
||||
changedDirs[path] = struct{}{}
|
||||
}
|
||||
if kind == ChangeKindAdd || kind == ChangeKindDelete {
|
||||
parent := filepath.Dir(path)
|
||||
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
||||
pi, err := os.Stat(filepath.Join(o.diffDir, parent))
|
||||
if err := changeFn(ChangeKindModify, parent, pi, err); err != nil {
|
||||
return err
|
||||
}
|
||||
changedDirs[parent] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return changeFn(kind, path, f, nil)
|
||||
})
|
||||
}
|
||||
|
||||
// doubleWalkDiff walks both directories to create a diff
|
||||
func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err error) {
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
var (
|
||||
c1 = make(chan *currentPath)
|
||||
c2 = make(chan *currentPath)
|
||||
|
||||
f1, f2 *currentPath
|
||||
rmdir string
|
||||
)
|
||||
g.Go(func() error {
|
||||
defer close(c1)
|
||||
return pathWalk(ctx, a, c1)
|
||||
})
|
||||
g.Go(func() error {
|
||||
defer close(c2)
|
||||
return pathWalk(ctx, b, c2)
|
||||
})
|
||||
g.Go(func() error {
|
||||
for c1 != nil || c2 != nil {
|
||||
if f1 == nil && c1 != nil {
|
||||
f1, err = nextPath(ctx, c1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f1 == nil {
|
||||
c1 = nil
|
||||
}
|
||||
}
|
||||
|
||||
if f2 == nil && c2 != nil {
|
||||
f2, err = nextPath(ctx, c2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f2 == nil {
|
||||
c2 = nil
|
||||
}
|
||||
}
|
||||
if f1 == nil && f2 == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var f os.FileInfo
|
||||
k, p := pathChange(f1, f2)
|
||||
switch k {
|
||||
case ChangeKindAdd:
|
||||
if rmdir != "" {
|
||||
rmdir = ""
|
||||
}
|
||||
f = f2.f
|
||||
f2 = nil
|
||||
case ChangeKindDelete:
|
||||
// Check if this file is already removed by being
|
||||
// under of a removed directory
|
||||
if rmdir != "" && strings.HasPrefix(f1.path, rmdir) {
|
||||
f1 = nil
|
||||
continue
|
||||
} else if f1.f.IsDir() {
|
||||
rmdir = f1.path + string(os.PathSeparator)
|
||||
} else if rmdir != "" {
|
||||
rmdir = ""
|
||||
}
|
||||
f1 = nil
|
||||
case ChangeKindModify:
|
||||
same, err := sameFile(f1, f2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f1.f.IsDir() && !f2.f.IsDir() {
|
||||
rmdir = f1.path + string(os.PathSeparator)
|
||||
} else if rmdir != "" {
|
||||
rmdir = ""
|
||||
}
|
||||
f = f2.f
|
||||
f1 = nil
|
||||
f2 = nil
|
||||
if same {
|
||||
if !isLinked(f) {
|
||||
continue
|
||||
}
|
||||
k = ChangeKindUnmodified
|
||||
}
|
||||
}
|
||||
if err := changeFn(k, p, f, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return g.Wait()
|
||||
}
|
74
vendor/github.com/containerd/continuity/fs/diff_unix.go
generated
vendored
Normal file
74
vendor/github.com/containerd/continuity/fs/diff_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// detectDirDiff returns diff dir options if a directory could
|
||||
// be found in the mount info for upper which is the direct
|
||||
// diff with the provided lower directory
|
||||
func detectDirDiff(upper, lower string) *diffDirOptions {
|
||||
// TODO: get mount options for upper
|
||||
// TODO: detect AUFS
|
||||
// TODO: detect overlay
|
||||
return nil
|
||||
}
|
||||
|
||||
// compareSysStat returns whether the stats are equivalent,
|
||||
// whether the files are considered the same file, and
|
||||
// an error
|
||||
func compareSysStat(s1, s2 interface{}) (bool, error) {
|
||||
ls1, ok := s1.(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
ls2, ok := s2.(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil
|
||||
}
|
||||
|
||||
func compareCapabilities(p1, p2 string) (bool, error) {
|
||||
c1, err := sysx.LGetxattr(p1, "security.capability")
|
||||
if err != nil && err != sysx.ENODATA {
|
||||
return false, errors.Wrapf(err, "failed to get xattr for %s", p1)
|
||||
}
|
||||
c2, err := sysx.LGetxattr(p2, "security.capability")
|
||||
if err != nil && err != sysx.ENODATA {
|
||||
return false, errors.Wrapf(err, "failed to get xattr for %s", p2)
|
||||
}
|
||||
return bytes.Equal(c1, c2), nil
|
||||
}
|
||||
|
||||
func isLinked(f os.FileInfo) bool {
|
||||
s, ok := f.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return !f.IsDir() && s.Nlink > 1
|
||||
}
|
48
vendor/github.com/containerd/continuity/fs/diff_windows.go
generated
vendored
Normal file
48
vendor/github.com/containerd/continuity/fs/diff_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func detectDirDiff(upper, lower string) *diffDirOptions {
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareSysStat(s1, s2 interface{}) (bool, error) {
|
||||
f1, ok := s1.(windows.Win32FileAttributeData)
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
f2, ok := s2.(windows.Win32FileAttributeData)
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
return f1.FileAttributes == f2.FileAttributes, nil
|
||||
}
|
||||
|
||||
func compareCapabilities(p1, p2 string) (bool, error) {
|
||||
// TODO: Use windows equivalent
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func isLinked(os.FileInfo) bool {
|
||||
return false
|
||||
}
|
103
vendor/github.com/containerd/continuity/fs/dtype_linux.go
generated
vendored
Normal file
103
vendor/github.com/containerd/continuity/fs/dtype_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func locateDummyIfEmpty(path string) (string, error) {
|
||||
children, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(children) != 0 {
|
||||
return "", nil
|
||||
}
|
||||
dummyFile, err := ioutil.TempFile(path, "fsutils-dummy")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
name := dummyFile.Name()
|
||||
err = dummyFile.Close()
|
||||
return name, err
|
||||
}
|
||||
|
||||
// SupportsDType returns whether the filesystem mounted on path supports d_type
|
||||
func SupportsDType(path string) (bool, error) {
|
||||
// locate dummy so that we have at least one dirent
|
||||
dummy, err := locateDummyIfEmpty(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if dummy != "" {
|
||||
defer os.Remove(dummy)
|
||||
}
|
||||
|
||||
visited := 0
|
||||
supportsDType := true
|
||||
fn := func(ent *syscall.Dirent) bool {
|
||||
visited++
|
||||
if ent.Type == syscall.DT_UNKNOWN {
|
||||
supportsDType = false
|
||||
// stop iteration
|
||||
return true
|
||||
}
|
||||
// continue iteration
|
||||
return false
|
||||
}
|
||||
if err = iterateReadDir(path, fn); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if visited == 0 {
|
||||
return false, fmt.Errorf("did not hit any dirent during iteration %s", path)
|
||||
}
|
||||
return supportsDType, nil
|
||||
}
|
||||
|
||||
func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error {
|
||||
d, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer d.Close()
|
||||
fd := int(d.Fd())
|
||||
buf := make([]byte, 4096)
|
||||
for {
|
||||
nbytes, err := syscall.ReadDirent(fd, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nbytes == 0 {
|
||||
break
|
||||
}
|
||||
for off := 0; off < nbytes; {
|
||||
ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off]))
|
||||
if stop := fn(ent); stop {
|
||||
return nil
|
||||
}
|
||||
off += int(ent.Reclen)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
38
vendor/github.com/containerd/continuity/fs/du.go
generated
vendored
Normal file
38
vendor/github.com/containerd/continuity/fs/du.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import "context"
|
||||
|
||||
// Usage of disk information
|
||||
type Usage struct {
|
||||
Inodes int64
|
||||
Size int64
|
||||
}
|
||||
|
||||
// DiskUsage counts the number of inodes and disk usage for the resources under
|
||||
// path.
|
||||
func DiskUsage(ctx context.Context, roots ...string) (Usage, error) {
|
||||
return diskUsage(ctx, roots...)
|
||||
}
|
||||
|
||||
// DiffUsage counts the numbers of inodes and disk usage in the
|
||||
// diff between the 2 directories. The first path is intended
|
||||
// as the base directory and the second as the changed directory.
|
||||
func DiffUsage(ctx context.Context, a, b string) (Usage, error) {
|
||||
return diffUsage(ctx, a, b)
|
||||
}
|
110
vendor/github.com/containerd/continuity/fs/du_unix.go
generated
vendored
Normal file
110
vendor/github.com/containerd/continuity/fs/du_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
|||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type inode struct {
|
||||
// TODO(stevvooe): Can probably reduce memory usage by not tracking
|
||||
// device, but we can leave this right for now.
|
||||
dev, ino uint64
|
||||
}
|
||||
|
||||
func newInode(stat *syscall.Stat_t) inode {
|
||||
return inode{
|
||||
// Dev is uint32 on darwin/bsd, uint64 on linux/solaris
|
||||
dev: uint64(stat.Dev), // nolint: unconvert
|
||||
// Ino is uint32 on bsd, uint64 on darwin/linux/solaris
|
||||
ino: uint64(stat.Ino), // nolint: unconvert
|
||||
}
|
||||
}
|
||||
|
||||
func diskUsage(ctx context.Context, roots ...string) (Usage, error) {
|
||||
|
||||
var (
|
||||
size int64
|
||||
inodes = map[inode]struct{}{} // expensive!
|
||||
)
|
||||
|
||||
for _, root := range roots {
|
||||
if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
inoKey := newInode(fi.Sys().(*syscall.Stat_t))
|
||||
if _, ok := inodes[inoKey]; !ok {
|
||||
inodes[inoKey] = struct{}{}
|
||||
size += fi.Size()
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return Usage{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return Usage{
|
||||
Inodes: int64(len(inodes)),
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func diffUsage(ctx context.Context, a, b string) (Usage, error) {
|
||||
var (
|
||||
size int64
|
||||
inodes = map[inode]struct{}{} // expensive!
|
||||
)
|
||||
|
||||
if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if kind == ChangeKindAdd || kind == ChangeKindModify {
|
||||
inoKey := newInode(fi.Sys().(*syscall.Stat_t))
|
||||
if _, ok := inodes[inoKey]; !ok {
|
||||
inodes[inoKey] = struct{}{}
|
||||
size += fi.Size()
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return Usage{}, err
|
||||
}
|
||||
|
||||
return Usage{
|
||||
Inodes: int64(len(inodes)),
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
82
vendor/github.com/containerd/continuity/fs/du_windows.go
generated
vendored
Normal file
82
vendor/github.com/containerd/continuity/fs/du_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func diskUsage(ctx context.Context, roots ...string) (Usage, error) {
|
||||
var (
|
||||
size int64
|
||||
)
|
||||
|
||||
// TODO(stevvooe): Support inodes (or equivalent) for windows.
|
||||
|
||||
for _, root := range roots {
|
||||
if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
size += fi.Size()
|
||||
return nil
|
||||
}); err != nil {
|
||||
return Usage{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return Usage{
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func diffUsage(ctx context.Context, a, b string) (Usage, error) {
|
||||
var (
|
||||
size int64
|
||||
)
|
||||
|
||||
if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if kind == ChangeKindAdd || kind == ChangeKindModify {
|
||||
size += fi.Size()
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return Usage{}, err
|
||||
}
|
||||
|
||||
return Usage{
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
43
vendor/github.com/containerd/continuity/fs/hardlink.go
generated
vendored
Normal file
43
vendor/github.com/containerd/continuity/fs/hardlink.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import "os"
|
||||
|
||||
// GetLinkInfo returns an identifier representing the node a hardlink is pointing
|
||||
// to. If the file is not hard linked then 0 will be returned.
|
||||
func GetLinkInfo(fi os.FileInfo) (uint64, bool) {
|
||||
return getLinkInfo(fi)
|
||||
}
|
||||
|
||||
// getLinkSource returns a path for the given name and
|
||||
// file info to its link source in the provided inode
|
||||
// map. If the given file name is not in the map and
|
||||
// has other links, it is added to the inode map
|
||||
// to be a source for other link locations.
|
||||
func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) {
|
||||
inode, isHardlink := getLinkInfo(fi)
|
||||
if !isHardlink {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
path, ok := inodes[inode]
|
||||
if !ok {
|
||||
inodes[inode] = name
|
||||
}
|
||||
return path, nil
|
||||
}
|
34
vendor/github.com/containerd/continuity/fs/hardlink_unix.go
generated
vendored
Normal file
34
vendor/github.com/containerd/continuity/fs/hardlink_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func getLinkInfo(fi os.FileInfo) (uint64, bool) {
|
||||
s, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Ino is uint32 on bsd, uint64 on darwin/linux/solaris
|
||||
return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 // nolint: unconvert
|
||||
}
|
23
vendor/github.com/containerd/continuity/fs/hardlink_windows.go
generated
vendored
Normal file
23
vendor/github.com/containerd/continuity/fs/hardlink_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import "os"
|
||||
|
||||
func getLinkInfo(fi os.FileInfo) (uint64, bool) {
|
||||
return 0, false
|
||||
}
|
313
vendor/github.com/containerd/continuity/fs/path.go
generated
vendored
Normal file
313
vendor/github.com/containerd/continuity/fs/path.go
generated
vendored
Normal file
|
@ -0,0 +1,313 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
errTooManyLinks = errors.New("too many links")
|
||||
)
|
||||
|
||||
type currentPath struct {
|
||||
path string
|
||||
f os.FileInfo
|
||||
fullPath string
|
||||
}
|
||||
|
||||
func pathChange(lower, upper *currentPath) (ChangeKind, string) {
|
||||
if lower == nil {
|
||||
if upper == nil {
|
||||
panic("cannot compare nil paths")
|
||||
}
|
||||
return ChangeKindAdd, upper.path
|
||||
}
|
||||
if upper == nil {
|
||||
return ChangeKindDelete, lower.path
|
||||
}
|
||||
|
||||
switch i := directoryCompare(lower.path, upper.path); {
|
||||
case i < 0:
|
||||
// File in lower that is not in upper
|
||||
return ChangeKindDelete, lower.path
|
||||
case i > 0:
|
||||
// File in upper that is not in lower
|
||||
return ChangeKindAdd, upper.path
|
||||
default:
|
||||
return ChangeKindModify, upper.path
|
||||
}
|
||||
}
|
||||
|
||||
func directoryCompare(a, b string) int {
|
||||
l := len(a)
|
||||
if len(b) < l {
|
||||
l = len(b)
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
c1, c2 := a[i], b[i]
|
||||
if c1 == filepath.Separator {
|
||||
c1 = byte(0)
|
||||
}
|
||||
if c2 == filepath.Separator {
|
||||
c2 = byte(0)
|
||||
}
|
||||
if c1 < c2 {
|
||||
return -1
|
||||
}
|
||||
if c1 > c2 {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
if len(a) > len(b) {
|
||||
return +1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func sameFile(f1, f2 *currentPath) (bool, error) {
|
||||
if os.SameFile(f1.f, f2.f) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
equalStat, err := compareSysStat(f1.f.Sys(), f2.f.Sys())
|
||||
if err != nil || !equalStat {
|
||||
return equalStat, err
|
||||
}
|
||||
|
||||
if eq, err := compareCapabilities(f1.fullPath, f2.fullPath); err != nil || !eq {
|
||||
return eq, err
|
||||
}
|
||||
|
||||
// If not a directory also check size, modtime, and content
|
||||
if !f1.f.IsDir() {
|
||||
if f1.f.Size() != f2.f.Size() {
|
||||
return false, nil
|
||||
}
|
||||
t1 := f1.f.ModTime()
|
||||
t2 := f2.f.ModTime()
|
||||
|
||||
if t1.Unix() != t2.Unix() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If the timestamp may have been truncated in both of the
|
||||
// files, check content of file to determine difference
|
||||
if t1.Nanosecond() == 0 && t2.Nanosecond() == 0 {
|
||||
var eq bool
|
||||
if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink {
|
||||
eq, err = compareSymlinkTarget(f1.fullPath, f2.fullPath)
|
||||
} else if f1.f.Size() > 0 {
|
||||
eq, err = compareFileContent(f1.fullPath, f2.fullPath)
|
||||
}
|
||||
if err != nil || !eq {
|
||||
return eq, err
|
||||
}
|
||||
} else if t1.Nanosecond() != t2.Nanosecond() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func compareSymlinkTarget(p1, p2 string) (bool, error) {
|
||||
t1, err := os.Readlink(p1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
t2, err := os.Readlink(p2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return t1 == t2, nil
|
||||
}
|
||||
|
||||
const compareChuckSize = 32 * 1024
|
||||
|
||||
// compareFileContent compares the content of 2 same sized files
|
||||
// by comparing each byte.
|
||||
func compareFileContent(p1, p2 string) (bool, error) {
|
||||
f1, err := os.Open(p1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer f1.Close()
|
||||
f2, err := os.Open(p2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer f2.Close()
|
||||
|
||||
b1 := make([]byte, compareChuckSize)
|
||||
b2 := make([]byte, compareChuckSize)
|
||||
for {
|
||||
n1, err1 := f1.Read(b1)
|
||||
if err1 != nil && err1 != io.EOF {
|
||||
return false, err1
|
||||
}
|
||||
n2, err2 := f2.Read(b2)
|
||||
if err2 != nil && err2 != io.EOF {
|
||||
return false, err2
|
||||
}
|
||||
if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) {
|
||||
return false, nil
|
||||
}
|
||||
if err1 == io.EOF && err2 == io.EOF {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pathWalk(ctx context.Context, root string, pathC chan<- *currentPath) error {
|
||||
return filepath.Walk(root, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(root, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path = filepath.Join(string(os.PathSeparator), path)
|
||||
|
||||
// Skip root
|
||||
if path == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
p := ¤tPath{
|
||||
path: path,
|
||||
f: f,
|
||||
fullPath: filepath.Join(root, path),
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case pathC <- p:
|
||||
return nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case p := <-pathC:
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
// RootPath joins a path with a root, evaluating and bounding any
|
||||
// symlink to the root directory.
|
||||
func RootPath(root, path string) (string, error) {
|
||||
if path == "" {
|
||||
return root, nil
|
||||
}
|
||||
var linksWalked int // to protect against cycles
|
||||
for {
|
||||
i := linksWalked
|
||||
newpath, err := walkLinks(root, path, &linksWalked)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
path = newpath
|
||||
if i == linksWalked {
|
||||
newpath = filepath.Join("/", newpath)
|
||||
if path == newpath {
|
||||
return filepath.Join(root, newpath), nil
|
||||
}
|
||||
path = newpath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) {
|
||||
if *linksWalked > 255 {
|
||||
return "", false, errTooManyLinks
|
||||
}
|
||||
|
||||
path = filepath.Join("/", path)
|
||||
if path == "/" {
|
||||
return path, false, nil
|
||||
}
|
||||
realPath := filepath.Join(root, path)
|
||||
|
||||
fi, err := os.Lstat(realPath)
|
||||
if err != nil {
|
||||
// If path does not yet exist, treat as non-symlink
|
||||
if os.IsNotExist(err) {
|
||||
return path, false, nil
|
||||
}
|
||||
return "", false, err
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink == 0 {
|
||||
return path, false, nil
|
||||
}
|
||||
newpath, err = os.Readlink(realPath)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
*linksWalked++
|
||||
return newpath, true, nil
|
||||
}
|
||||
|
||||
func walkLinks(root, path string, linksWalked *int) (string, error) {
|
||||
switch dir, file := filepath.Split(path); {
|
||||
case dir == "":
|
||||
newpath, _, err := walkLink(root, file, linksWalked)
|
||||
return newpath, err
|
||||
case file == "":
|
||||
if os.IsPathSeparator(dir[len(dir)-1]) {
|
||||
if dir == "/" {
|
||||
return dir, nil
|
||||
}
|
||||
return walkLinks(root, dir[:len(dir)-1], linksWalked)
|
||||
}
|
||||
newpath, _, err := walkLink(root, dir, linksWalked)
|
||||
return newpath, err
|
||||
default:
|
||||
newdir, err := walkLinks(root, dir, linksWalked)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !islink {
|
||||
return newpath, nil
|
||||
}
|
||||
if filepath.IsAbs(newpath) {
|
||||
return newpath, nil
|
||||
}
|
||||
return filepath.Join(newdir, newpath), nil
|
||||
}
|
||||
}
|
44
vendor/github.com/containerd/continuity/fs/stat_bsd.go
generated
vendored
Normal file
44
vendor/github.com/containerd/continuity/fs/stat_bsd.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
// +build darwin freebsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// StatAtime returns the access time from a stat struct
|
||||
func StatAtime(st *syscall.Stat_t) syscall.Timespec {
|
||||
return st.Atimespec
|
||||
}
|
||||
|
||||
// StatCtime returns the created time from a stat struct
|
||||
func StatCtime(st *syscall.Stat_t) syscall.Timespec {
|
||||
return st.Ctimespec
|
||||
}
|
||||
|
||||
// StatMtime returns the modified time from a stat struct
|
||||
func StatMtime(st *syscall.Stat_t) syscall.Timespec {
|
||||
return st.Mtimespec
|
||||
}
|
||||
|
||||
// StatATimeAsTime returns the access time as a time.Time
|
||||
func StatATimeAsTime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert
|
||||
}
|
43
vendor/github.com/containerd/continuity/fs/stat_linux.go
generated
vendored
Normal file
43
vendor/github.com/containerd/continuity/fs/stat_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// StatAtime returns the Atim
|
||||
func StatAtime(st *syscall.Stat_t) syscall.Timespec {
|
||||
return st.Atim
|
||||
}
|
||||
|
||||
// StatCtime returns the Ctim
|
||||
func StatCtime(st *syscall.Stat_t) syscall.Timespec {
|
||||
return st.Ctim
|
||||
}
|
||||
|
||||
// StatMtime returns the Mtim
|
||||
func StatMtime(st *syscall.Stat_t) syscall.Timespec {
|
||||
return st.Mtim
|
||||
}
|
||||
|
||||
// StatATimeAsTime returns st.Atim as a time.Time
|
||||
func StatATimeAsTime(st *syscall.Stat_t) time.Time {
|
||||
// The int64 conversions ensure the line compiles for 32-bit systems as well.
|
||||
return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert
|
||||
}
|
29
vendor/github.com/containerd/continuity/fs/time.go
generated
vendored
Normal file
29
vendor/github.com/containerd/continuity/fs/time.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fs
|
||||
|
||||
import "time"
|
||||
|
||||
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||
// precision, which is problematic when we apply changes via tar
|
||||
// files, we handle this by comparing for exact times, *or* same
|
||||
// second count and either a or b having exactly 0 nanoseconds
|
||||
func sameFsTime(a, b time.Time) bool {
|
||||
return a == b ||
|
||||
(a.Unix() == b.Unix() &&
|
||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||
}
|
16
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
16
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
|
@ -1,3 +1,19 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pathdriver
|
||||
|
||||
import (
|
||||
|
|
26
vendor/github.com/containerd/continuity/syscallx/syscall_unix.go
generated
vendored
Normal file
26
vendor/github.com/containerd/continuity/syscallx/syscall_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package syscallx
|
||||
|
||||
import "syscall"
|
||||
|
||||
// Readlink returns the destination of the named symbolic link.
|
||||
func Readlink(path string, buf []byte) (n int, err error) {
|
||||
return syscall.Readlink(path, buf)
|
||||
}
|
112
vendor/github.com/containerd/continuity/syscallx/syscall_windows.go
generated
vendored
Normal file
112
vendor/github.com/containerd/continuity/syscallx/syscall_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package syscallx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type reparseDataBuffer struct {
|
||||
ReparseTag uint32
|
||||
ReparseDataLength uint16
|
||||
Reserved uint16
|
||||
|
||||
// GenericReparseBuffer
|
||||
reparseBuffer byte
|
||||
}
|
||||
|
||||
type mountPointReparseBuffer struct {
|
||||
SubstituteNameOffset uint16
|
||||
SubstituteNameLength uint16
|
||||
PrintNameOffset uint16
|
||||
PrintNameLength uint16
|
||||
PathBuffer [1]uint16
|
||||
}
|
||||
|
||||
type symbolicLinkReparseBuffer struct {
|
||||
SubstituteNameOffset uint16
|
||||
SubstituteNameLength uint16
|
||||
PrintNameOffset uint16
|
||||
PrintNameLength uint16
|
||||
Flags uint32
|
||||
PathBuffer [1]uint16
|
||||
}
|
||||
|
||||
const (
|
||||
_IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
|
||||
_SYMLINK_FLAG_RELATIVE = 1
|
||||
)
|
||||
|
||||
// Readlink returns the destination of the named symbolic link.
|
||||
func Readlink(path string, buf []byte) (n int, err error) {
|
||||
fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), syscall.GENERIC_READ, 0, nil, syscall.OPEN_EXISTING,
|
||||
syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
defer syscall.CloseHandle(fd)
|
||||
|
||||
rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
|
||||
var bytesReturned uint32
|
||||
err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0]))
|
||||
var s string
|
||||
switch rdb.ReparseTag {
|
||||
case syscall.IO_REPARSE_TAG_SYMLINK:
|
||||
data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
||||
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
||||
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2])
|
||||
if data.Flags&_SYMLINK_FLAG_RELATIVE == 0 {
|
||||
if len(s) >= 4 && s[:4] == `\??\` {
|
||||
s = s[4:]
|
||||
switch {
|
||||
case len(s) >= 2 && s[1] == ':': // \??\C:\foo\bar
|
||||
// do nothing
|
||||
case len(s) >= 4 && s[:4] == `UNC\`: // \??\UNC\foo\bar
|
||||
s = `\\` + s[4:]
|
||||
default:
|
||||
// unexpected; do nothing
|
||||
}
|
||||
} else {
|
||||
// unexpected; do nothing
|
||||
}
|
||||
}
|
||||
case _IO_REPARSE_TAG_MOUNT_POINT:
|
||||
data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
||||
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
||||
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2])
|
||||
if len(s) >= 4 && s[:4] == `\??\` { // \??\C:\foo\bar
|
||||
if len(s) < 48 || s[:11] != `\??\Volume{` {
|
||||
s = s[4:]
|
||||
}
|
||||
} else {
|
||||
// unexpected; do nothing
|
||||
}
|
||||
default:
|
||||
// the path is not a symlink or junction but another type of reparse
|
||||
// point
|
||||
return -1, syscall.ENOENT
|
||||
}
|
||||
n = copy(buf, []byte(s))
|
||||
|
||||
return n, nil
|
||||
}
|
128
vendor/github.com/containerd/continuity/sysx/file_posix.go
generated
vendored
Normal file
128
vendor/github.com/containerd/continuity/sysx/file_posix.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/continuity/syscallx"
|
||||
)
|
||||
|
||||
// Readlink returns the destination of the named symbolic link.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func Readlink(name string) (string, error) {
|
||||
for len := 128; ; len *= 2 {
|
||||
b := make([]byte, len)
|
||||
n, e := fixCount(syscallx.Readlink(fixLongPath(name), b))
|
||||
if e != nil {
|
||||
return "", &os.PathError{Op: "readlink", Path: name, Err: e}
|
||||
}
|
||||
if n < len {
|
||||
return string(b[0:n]), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Many functions in package syscall return a count of -1 instead of 0.
|
||||
// Using fixCount(call()) instead of call() corrects the count.
|
||||
func fixCount(n int, err error) (int, error) {
|
||||
if n < 0 {
|
||||
n = 0
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// fixLongPath returns the extended-length (\\?\-prefixed) form of
|
||||
// path when needed, in order to avoid the default 260 character file
|
||||
// path limit imposed by Windows. If path is not easily converted to
|
||||
// the extended-length form (for example, if path is a relative path
|
||||
// or contains .. elements), or is short enough, fixLongPath returns
|
||||
// path unmodified.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
|
||||
func fixLongPath(path string) string {
|
||||
// Do nothing (and don't allocate) if the path is "short".
|
||||
// Empirically (at least on the Windows Server 2013 builder),
|
||||
// the kernel is arbitrarily okay with < 248 bytes. That
|
||||
// matches what the docs above say:
|
||||
// "When using an API to create a directory, the specified
|
||||
// path cannot be so long that you cannot append an 8.3 file
|
||||
// name (that is, the directory name cannot exceed MAX_PATH
|
||||
// minus 12)." Since MAX_PATH is 260, 260 - 12 = 248.
|
||||
//
|
||||
// The MSDN docs appear to say that a normal path that is 248 bytes long
|
||||
// will work; empirically the path must be less then 248 bytes long.
|
||||
if len(path) < 248 {
|
||||
// Don't fix. (This is how Go 1.7 and earlier worked,
|
||||
// not automatically generating the \\?\ form)
|
||||
return path
|
||||
}
|
||||
|
||||
// The extended form begins with \\?\, as in
|
||||
// \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt.
|
||||
// The extended form disables evaluation of . and .. path
|
||||
// elements and disables the interpretation of / as equivalent
|
||||
// to \. The conversion here rewrites / to \ and elides
|
||||
// . elements as well as trailing or duplicate separators. For
|
||||
// simplicity it avoids the conversion entirely for relative
|
||||
// paths or paths containing .. elements. For now,
|
||||
// \\server\share paths are not converted to
|
||||
// \\?\UNC\server\share paths because the rules for doing so
|
||||
// are less well-specified.
|
||||
if len(path) >= 2 && path[:2] == `\\` {
|
||||
// Don't canonicalize UNC paths.
|
||||
return path
|
||||
}
|
||||
if !filepath.IsAbs(path) {
|
||||
// Relative path
|
||||
return path
|
||||
}
|
||||
|
||||
const prefix = `\\?`
|
||||
|
||||
pathbuf := make([]byte, len(prefix)+len(path)+len(`\`))
|
||||
copy(pathbuf, prefix)
|
||||
n := len(path)
|
||||
r, w := 0, len(prefix)
|
||||
for r < n {
|
||||
switch {
|
||||
case os.IsPathSeparator(path[r]):
|
||||
// empty block
|
||||
r++
|
||||
case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
|
||||
// /./
|
||||
r++
|
||||
case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
|
||||
// /../ is currently unhandled
|
||||
return path
|
||||
default:
|
||||
pathbuf[w] = '\\'
|
||||
w++
|
||||
for ; r < n && !os.IsPathSeparator(path[r]); r++ {
|
||||
pathbuf[w] = path[r]
|
||||
w++
|
||||
}
|
||||
}
|
||||
}
|
||||
// A drive's root directory needs a trailing \
|
||||
if w == len(`\\?\c:`) {
|
||||
pathbuf[w] = '\\'
|
||||
w++
|
||||
}
|
||||
return string(pathbuf[:w])
|
||||
}
|
23
vendor/github.com/containerd/continuity/sysx/nodata_linux.go
generated
vendored
Normal file
23
vendor/github.com/containerd/continuity/sysx/nodata_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const ENODATA = syscall.ENODATA
|
24
vendor/github.com/containerd/continuity/sysx/nodata_solaris.go
generated
vendored
Normal file
24
vendor/github.com/containerd/continuity/sysx/nodata_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// This should actually be a set that contains ENOENT and EPERM
|
||||
const ENODATA = syscall.ENOENT
|
25
vendor/github.com/containerd/continuity/sysx/nodata_unix.go
generated
vendored
Normal file
25
vendor/github.com/containerd/continuity/sysx/nodata_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
// +build darwin freebsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const ENODATA = syscall.ENOATTR
|
125
vendor/github.com/containerd/continuity/sysx/xattr.go
generated
vendored
Normal file
125
vendor/github.com/containerd/continuity/sysx/xattr.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
// +build linux darwin
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Listxattr calls syscall listxattr and reads all content
|
||||
// and returns a string array
|
||||
func Listxattr(path string) ([]string, error) {
|
||||
return listxattrAll(path, unix.Listxattr)
|
||||
}
|
||||
|
||||
// Removexattr calls syscall removexattr
|
||||
func Removexattr(path string, attr string) (err error) {
|
||||
return unix.Removexattr(path, attr)
|
||||
}
|
||||
|
||||
// Setxattr calls syscall setxattr
|
||||
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return unix.Setxattr(path, attr, data, flags)
|
||||
}
|
||||
|
||||
// Getxattr calls syscall getxattr
|
||||
func Getxattr(path, attr string) ([]byte, error) {
|
||||
return getxattrAll(path, attr, unix.Getxattr)
|
||||
}
|
||||
|
||||
// LListxattr lists xattrs, not following symlinks
|
||||
func LListxattr(path string) ([]string, error) {
|
||||
return listxattrAll(path, unix.Llistxattr)
|
||||
}
|
||||
|
||||
// LRemovexattr removes an xattr, not following symlinks
|
||||
func LRemovexattr(path string, attr string) (err error) {
|
||||
return unix.Lremovexattr(path, attr)
|
||||
}
|
||||
|
||||
// LSetxattr sets an xattr, not following symlinks
|
||||
func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return unix.Lsetxattr(path, attr, data, flags)
|
||||
}
|
||||
|
||||
// LGetxattr gets an xattr, not following symlinks
|
||||
func LGetxattr(path, attr string) ([]byte, error) {
|
||||
return getxattrAll(path, attr, unix.Lgetxattr)
|
||||
}
|
||||
|
||||
const defaultXattrBufferSize = 5
|
||||
|
||||
type listxattrFunc func(path string, dest []byte) (int, error)
|
||||
|
||||
func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) {
|
||||
var p []byte // nil on first execution
|
||||
|
||||
for {
|
||||
n, err := listFunc(path, p) // first call gets buffer size.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n > len(p) {
|
||||
p = make([]byte, n)
|
||||
continue
|
||||
}
|
||||
|
||||
p = p[:n]
|
||||
|
||||
ps := bytes.Split(bytes.TrimSuffix(p, []byte{0}), []byte{0})
|
||||
var entries []string
|
||||
for _, p := range ps {
|
||||
s := string(p)
|
||||
if s != "" {
|
||||
entries = append(entries, s)
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
}
|
||||
|
||||
type getxattrFunc func(string, string, []byte) (int, error)
|
||||
|
||||
func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) {
|
||||
p := make([]byte, defaultXattrBufferSize)
|
||||
for {
|
||||
n, err := getFunc(path, attr, p)
|
||||
if err != nil {
|
||||
if errno, ok := err.(syscall.Errno); ok && errno == syscall.ERANGE {
|
||||
p = make([]byte, len(p)*2) // this can't be ideal.
|
||||
continue // try again!
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// realloc to correct size and repeat
|
||||
if n > len(p) {
|
||||
p = make([]byte, n)
|
||||
continue
|
||||
}
|
||||
|
||||
return p[:n], nil
|
||||
}
|
||||
}
|
67
vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go
generated
vendored
Normal file
67
vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
// +build !linux,!darwin
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysx
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
var unsupported = errors.New("extended attributes unsupported on " + runtime.GOOS)
|
||||
|
||||
// Listxattr calls syscall listxattr and reads all content
|
||||
// and returns a string array
|
||||
func Listxattr(path string) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
// Removexattr calls syscall removexattr
|
||||
func Removexattr(path string, attr string) (err error) {
|
||||
return unsupported
|
||||
}
|
||||
|
||||
// Setxattr calls syscall setxattr
|
||||
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return unsupported
|
||||
}
|
||||
|
||||
// Getxattr calls syscall getxattr
|
||||
func Getxattr(path, attr string) ([]byte, error) {
|
||||
return []byte{}, unsupported
|
||||
}
|
||||
|
||||
// LListxattr lists xattrs, not following symlinks
|
||||
func LListxattr(path string) ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
// LRemovexattr removes an xattr, not following symlinks
|
||||
func LRemovexattr(path string, attr string) (err error) {
|
||||
return unsupported
|
||||
}
|
||||
|
||||
// LSetxattr sets an xattr, not following symlinks
|
||||
func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return unsupported
|
||||
}
|
||||
|
||||
// LGetxattr gets an xattr, not following symlinks
|
||||
func LGetxattr(path, attr string) ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
}
|
110
vendor/github.com/docker/cli/cli/command/image/build/context.go
generated
vendored
110
vendor/github.com/docker/cli/cli/command/image/build/context.go
generated
vendored
|
@ -41,13 +41,19 @@ func ValidateContextDirectory(srcPath string, excludes []string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pm, err := fileutils.NewPatternMatcher(excludes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
return errors.Errorf("can't stat '%s'", filePath)
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
return errors.Errorf("file ('%s') not found or excluded by .dockerignore", filePath)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -55,7 +61,7 @@ func ValidateContextDirectory(srcPath string, excludes []string) error {
|
|||
// skip this directory/file if it's not in the path, it won't get added to the context
|
||||
if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil {
|
||||
return err
|
||||
} else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
|
||||
} else if skip, err := filepathMatches(pm, relFilePath); err != nil {
|
||||
return err
|
||||
} else if skip {
|
||||
if f.IsDir() {
|
||||
|
@ -81,59 +87,91 @@ func ValidateContextDirectory(srcPath string, excludes []string) error {
|
|||
})
|
||||
}
|
||||
|
||||
func filepathMatches(matcher *fileutils.PatternMatcher, file string) (bool, error) {
|
||||
file = filepath.Clean(file)
|
||||
if file == "." {
|
||||
// Don't let them exclude everything, kind of silly.
|
||||
return false, nil
|
||||
}
|
||||
return matcher.Matches(file)
|
||||
}
|
||||
|
||||
// DetectArchiveReader detects whether the input stream is an archive or a
|
||||
// Dockerfile and returns a buffered version of input, safe to consume in lieu
|
||||
// of input. If an archive is detected, isArchive is set to true, and to false
|
||||
// otherwise, in which case it is safe to assume input represents the contents
|
||||
// of a Dockerfile.
|
||||
func DetectArchiveReader(input io.ReadCloser) (rc io.ReadCloser, isArchive bool, err error) {
|
||||
buf := bufio.NewReader(input)
|
||||
|
||||
magic, err := buf.Peek(archiveHeaderSize * 2)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, false, errors.Errorf("failed to peek context header from STDIN: %v", err)
|
||||
}
|
||||
|
||||
return ioutils.NewReadCloserWrapper(buf, func() error { return input.Close() }), IsArchive(magic), nil
|
||||
}
|
||||
|
||||
// WriteTempDockerfile writes a Dockerfile stream to a temporary file with a
|
||||
// name specified by DefaultDockerfileName and returns the path to the
|
||||
// temporary directory containing the Dockerfile.
|
||||
func WriteTempDockerfile(rc io.ReadCloser) (dockerfileDir string, err error) {
|
||||
// err is a named return value, due to the defer call below.
|
||||
dockerfileDir, err = ioutil.TempDir("", "docker-build-tempdockerfile-")
|
||||
if err != nil {
|
||||
return "", errors.Errorf("unable to create temporary context directory: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(dockerfileDir)
|
||||
}
|
||||
}()
|
||||
|
||||
f, err := os.Create(filepath.Join(dockerfileDir, DefaultDockerfileName))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := io.Copy(f, rc); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return dockerfileDir, rc.Close()
|
||||
}
|
||||
|
||||
// GetContextFromReader will read the contents of the given reader as either a
|
||||
// Dockerfile or tar archive. Returns a tar archive used as a context and a
|
||||
// path to the Dockerfile inside the tar.
|
||||
func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) {
|
||||
buf := bufio.NewReader(r)
|
||||
|
||||
magic, err := buf.Peek(archiveHeaderSize)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, "", errors.Errorf("failed to peek context header from STDIN: %v", err)
|
||||
func GetContextFromReader(rc io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) {
|
||||
rc, isArchive, err := DetectArchiveReader(rc)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if IsArchive(magic) {
|
||||
return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil
|
||||
if isArchive {
|
||||
return rc, dockerfileName, nil
|
||||
}
|
||||
|
||||
// Input should be read as a Dockerfile.
|
||||
|
||||
if dockerfileName == "-" {
|
||||
return nil, "", errors.New("build context is not an archive")
|
||||
}
|
||||
|
||||
// Input should be read as a Dockerfile.
|
||||
tmpDir, err := ioutil.TempDir("", "docker-build-context-")
|
||||
if err != nil {
|
||||
return nil, "", errors.Errorf("unable to create temporary context directory: %v", err)
|
||||
}
|
||||
|
||||
f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName))
|
||||
dockerfileDir, err := WriteTempDockerfile(rc)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
_, err = io.Copy(f, buf)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if err := r.Close(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
tar, err := archive.Tar(tmpDir, archive.Uncompressed)
|
||||
tar, err := archive.Tar(dockerfileDir, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return ioutils.NewReadCloserWrapper(tar, func() error {
|
||||
err := tar.Close()
|
||||
os.RemoveAll(tmpDir)
|
||||
os.RemoveAll(dockerfileDir)
|
||||
return err
|
||||
}), DefaultDockerfileName, nil
|
||||
|
||||
}
|
||||
|
||||
// IsArchive checks for the magic bytes of a tar or any supported compression
|
||||
|
@ -167,6 +205,10 @@ func GetContextFromGitURL(gitURL, dockerfileName string) (string, string, error)
|
|||
return "", "", err
|
||||
}
|
||||
relDockerfile, err := getDockerfileRelPath(absContextDir, dockerfileName)
|
||||
if err == nil && strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) {
|
||||
return "", "", errors.Errorf("the Dockerfile (%s) must be within the build context", dockerfileName)
|
||||
}
|
||||
|
||||
return absContextDir, relDockerfile, err
|
||||
}
|
||||
|
||||
|
@ -318,10 +360,6 @@ func getDockerfileRelPath(absContextDir, givenDockerfile string) (string, error)
|
|||
return "", errors.Errorf("unable to get relative Dockerfile path: %v", err)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) {
|
||||
return "", errors.Errorf("the Dockerfile (%s) must be within the build context", givenDockerfile)
|
||||
}
|
||||
|
||||
return relDockerfile, nil
|
||||
}
|
||||
|
||||
|
|
30
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
30
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
|
@ -5,10 +5,11 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
@ -18,6 +19,7 @@ const (
|
|||
ConfigFileName = "config.json"
|
||||
configFileDir = ".docker"
|
||||
oldConfigfile = ".dockercfg"
|
||||
contextsDir = "contexts"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -35,9 +37,23 @@ func Dir() string {
|
|||
return configDir
|
||||
}
|
||||
|
||||
// ContextStoreDir returns the directory the docker contexts are stored in
|
||||
func ContextStoreDir() string {
|
||||
return filepath.Join(Dir(), contextsDir)
|
||||
}
|
||||
|
||||
// SetDir sets the directory the configuration file is stored in
|
||||
func SetDir(dir string) {
|
||||
configDir = dir
|
||||
configDir = filepath.Clean(dir)
|
||||
}
|
||||
|
||||
// Path returns the path to a file relative to the config dir
|
||||
func Path(p ...string) (string, error) {
|
||||
path := filepath.Join(append([]string{Dir()}, p...)...)
|
||||
if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) {
|
||||
return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir())
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from
|
||||
|
@ -75,18 +91,18 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
|
|||
if _, err := os.Stat(filename); err == nil {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return configFile, errors.Errorf("%s - %v", filename, err)
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
defer file.Close()
|
||||
err = configFile.LoadFromReader(file)
|
||||
if err != nil {
|
||||
err = errors.Errorf("%s - %v", filename, err)
|
||||
err = errors.Wrap(err, filename)
|
||||
}
|
||||
return configFile, err
|
||||
} else if !os.IsNotExist(err) {
|
||||
// if file is there but we can't stat it for any reason other
|
||||
// than it doesn't exist then stop
|
||||
return configFile, errors.Errorf("%s - %v", filename, err)
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
|
||||
// Can't find latest config file so check for the old one
|
||||
|
@ -96,12 +112,12 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
|
|||
}
|
||||
file, err := os.Open(confFile)
|
||||
if err != nil {
|
||||
return configFile, errors.Errorf("%s - %v", confFile, err)
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
defer file.Close()
|
||||
err = configFile.LegacyLoadFromReader(file)
|
||||
if err != nil {
|
||||
return configFile, errors.Errorf("%s - %v", confFile, err)
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
}
|
||||
return configFile, nil
|
||||
}
|
||||
|
|
140
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
140
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
|
@ -3,6 +3,7 @@ package configfile
|
|||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -10,8 +11,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -19,7 +19,7 @@ const (
|
|||
// This constant is only used for really old config files when the
|
||||
// URL wasn't saved as part of the config file and it was just
|
||||
// assumed to be this value.
|
||||
defaultIndexserver = "https://index.docker.io/v1/"
|
||||
defaultIndexServer = "https://index.docker.io/v1/"
|
||||
)
|
||||
|
||||
// ConfigFile ~/.docker/config.json file info
|
||||
|
@ -44,6 +44,13 @@ type ConfigFile struct {
|
|||
NodesFormat string `json:"nodesFormat,omitempty"`
|
||||
PruneFilters []string `json:"pruneFilters,omitempty"`
|
||||
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
||||
Experimental string `json:"experimental,omitempty"`
|
||||
StackOrchestrator string `json:"stackOrchestrator,omitempty"`
|
||||
Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"`
|
||||
CurrentContext string `json:"currentContext,omitempty"`
|
||||
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
|
||||
Plugins map[string]map[string]string `json:"plugins,omitempty"`
|
||||
Aliases map[string]string `json:"aliases,omitempty"`
|
||||
}
|
||||
|
||||
// ProxyConfig contains proxy configuration settings
|
||||
|
@ -54,12 +61,19 @@ type ProxyConfig struct {
|
|||
FTPProxy string `json:"ftpProxy,omitempty"`
|
||||
}
|
||||
|
||||
// KubernetesConfig contains Kubernetes orchestrator settings
|
||||
type KubernetesConfig struct {
|
||||
AllNamespaces string `json:"allNamespaces,omitempty"`
|
||||
}
|
||||
|
||||
// New initializes an empty configuration file for the given filename 'fn'
|
||||
func New(fn string) *ConfigFile {
|
||||
return &ConfigFile{
|
||||
AuthConfigs: make(map[string]types.AuthConfig),
|
||||
HTTPHeaders: make(map[string]string),
|
||||
Filename: fn,
|
||||
Plugins: make(map[string]map[string]string),
|
||||
Aliases: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,8 +99,8 @@ func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
authConfig.ServerAddress = defaultIndexserver
|
||||
configFile.AuthConfigs[defaultIndexserver] = authConfig
|
||||
authConfig.ServerAddress = defaultIndexServer
|
||||
configFile.AuthConfigs[defaultIndexServer] = authConfig
|
||||
} else {
|
||||
for k, authConfig := range configFile.AuthConfigs {
|
||||
authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth)
|
||||
|
@ -117,7 +131,7 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
|
|||
ac.ServerAddress = addr
|
||||
configFile.AuthConfigs[addr] = ac
|
||||
}
|
||||
return nil
|
||||
return checkKubernetesConfiguration(configFile.Kubernetes)
|
||||
}
|
||||
|
||||
// ContainsAuth returns whether there is authentication configured
|
||||
|
@ -166,20 +180,26 @@ func (configFile *ConfigFile) Save() error {
|
|||
return errors.Errorf("Can't save config with empty filename")
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil {
|
||||
dir := filepath.Dir(configFile.Filename)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
return configFile.SaveToWriter(f)
|
||||
err = configFile.SaveToWriter(temp)
|
||||
temp.Close()
|
||||
if err != nil {
|
||||
os.Remove(temp.Name())
|
||||
return err
|
||||
}
|
||||
return os.Rename(temp.Name(), configFile.Filename)
|
||||
}
|
||||
|
||||
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and
|
||||
// then checking this against any environment variables provided to the container
|
||||
func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts []string) map[string]*string {
|
||||
func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string {
|
||||
var cfgKey string
|
||||
|
||||
if _, ok := configFile.Proxies[host]; !ok {
|
||||
|
@ -195,7 +215,10 @@ func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts []string) ma
|
|||
"NO_PROXY": &config.NoProxy,
|
||||
"FTP_PROXY": &config.FTPProxy,
|
||||
}
|
||||
m := opts.ConvertKVStringsToMapWithNil(runOpts)
|
||||
m := runOpts
|
||||
if m == nil {
|
||||
m = make(map[string]*string)
|
||||
}
|
||||
for k := range permitted {
|
||||
if *permitted[k] == "" {
|
||||
continue
|
||||
|
@ -249,24 +272,29 @@ func decodeAuth(authStr string) (string, string, error) {
|
|||
|
||||
// GetCredentialsStore returns a new credentials store from the settings in the
|
||||
// configuration file
|
||||
func (configFile *ConfigFile) GetCredentialsStore(serverAddress string) credentials.Store {
|
||||
if helper := getConfiguredCredentialStore(configFile, serverAddress); helper != "" {
|
||||
return credentials.NewNativeStore(configFile, helper)
|
||||
func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store {
|
||||
if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" {
|
||||
return newNativeStore(configFile, helper)
|
||||
}
|
||||
return credentials.NewFileStore(configFile)
|
||||
}
|
||||
|
||||
// var for unit testing.
|
||||
var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store {
|
||||
return credentials.NewNativeStore(configFile, helperSuffix)
|
||||
}
|
||||
|
||||
// GetAuthConfig for a repository from the credential store
|
||||
func (configFile *ConfigFile) GetAuthConfig(serverAddress string) (types.AuthConfig, error) {
|
||||
return configFile.GetCredentialsStore(serverAddress).Get(serverAddress)
|
||||
func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) {
|
||||
return configFile.GetCredentialsStore(registryHostname).Get(registryHostname)
|
||||
}
|
||||
|
||||
// getConfiguredCredentialStore returns the credential helper configured for the
|
||||
// given registry, the default credsStore, or the empty string if neither are
|
||||
// configured.
|
||||
func getConfiguredCredentialStore(c *ConfigFile, serverAddress string) string {
|
||||
if c.CredentialHelpers != nil && serverAddress != "" {
|
||||
if helper, exists := c.CredentialHelpers[serverAddress]; exists {
|
||||
func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string {
|
||||
if c.CredentialHelpers != nil && registryHostname != "" {
|
||||
if helper, exists := c.CredentialHelpers[registryHostname]; exists {
|
||||
return helper
|
||||
}
|
||||
}
|
||||
|
@ -283,19 +311,75 @@ func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig,
|
|||
}
|
||||
}
|
||||
|
||||
for registry := range configFile.CredentialHelpers {
|
||||
helper := configFile.GetCredentialsStore(registry)
|
||||
newAuths, err := helper.GetAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addAll(newAuths)
|
||||
}
|
||||
defaultStore := configFile.GetCredentialsStore("")
|
||||
newAuths, err := defaultStore.GetAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addAll(newAuths)
|
||||
|
||||
// Auth configs from a registry-specific helper should override those from the default store.
|
||||
for registryHostname := range configFile.CredentialHelpers {
|
||||
newAuth, err := configFile.GetAuthConfig(registryHostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auths[registryHostname] = newAuth
|
||||
}
|
||||
return auths, nil
|
||||
}
|
||||
|
||||
// GetFilename returns the file name that this config file is based on.
|
||||
func (configFile *ConfigFile) GetFilename() string {
|
||||
return configFile.Filename
|
||||
}
|
||||
|
||||
// PluginConfig retrieves the requested option for the given plugin.
|
||||
func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) {
|
||||
if configFile.Plugins == nil {
|
||||
return "", false
|
||||
}
|
||||
pluginConfig, ok := configFile.Plugins[pluginname]
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
value, ok := pluginConfig[option]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// SetPluginConfig sets the option to the given value for the given
|
||||
// plugin. Passing a value of "" will remove the option. If removing
|
||||
// the final config item for a given plugin then also cleans up the
|
||||
// overall plugin entry.
|
||||
func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) {
|
||||
if configFile.Plugins == nil {
|
||||
configFile.Plugins = make(map[string]map[string]string)
|
||||
}
|
||||
pluginConfig, ok := configFile.Plugins[pluginname]
|
||||
if !ok {
|
||||
pluginConfig = make(map[string]string)
|
||||
configFile.Plugins[pluginname] = pluginConfig
|
||||
}
|
||||
if value != "" {
|
||||
pluginConfig[option] = value
|
||||
} else {
|
||||
delete(pluginConfig, option)
|
||||
}
|
||||
if len(pluginConfig) == 0 {
|
||||
delete(configFile.Plugins, pluginname)
|
||||
}
|
||||
}
|
||||
|
||||
func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error {
|
||||
if kubeConfig == nil {
|
||||
return nil
|
||||
}
|
||||
switch kubeConfig.AllNamespaces {
|
||||
case "":
|
||||
case "enabled":
|
||||
case "disabled":
|
||||
default:
|
||||
return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/cli/cli/config/credentials/credentials.go
generated
vendored
2
vendor/github.com/docker/cli/cli/config/credentials/credentials.go
generated
vendored
|
@ -1,7 +1,7 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
)
|
||||
|
||||
// Store is the interface that any credentials store must implement.
|
||||
|
|
4
vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go
generated
vendored
4
vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go
generated
vendored
|
@ -1,11 +1,11 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/docker/docker-credential-helpers/pass"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func defaultCredentialsStore() string {
|
||||
if pass.PassInitialized {
|
||||
if _, err := exec.LookPath("pass"); err == nil {
|
||||
return "pass"
|
||||
}
|
||||
|
||||
|
|
4
vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
generated
vendored
4
vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
generated
vendored
|
@ -2,4 +2,6 @@
|
|||
|
||||
package credentials
|
||||
|
||||
const defaultCredentialsStore = ""
|
||||
func defaultCredentialsStore() string {
|
||||
return ""
|
||||
}
|
||||
|
|
32
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
32
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
|
@ -1,13 +1,15 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/registry"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
)
|
||||
|
||||
type store interface {
|
||||
Save() error
|
||||
GetAuthConfigs() map[string]types.AuthConfig
|
||||
GetFilename() string
|
||||
}
|
||||
|
||||
// fileStore implements a credentials store using
|
||||
|
@ -34,7 +36,7 @@ func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) {
|
|||
// Maybe they have a legacy config file, we will iterate the keys converting
|
||||
// them to the new format and testing
|
||||
for r, ac := range c.file.GetAuthConfigs() {
|
||||
if serverAddress == registry.ConvertToHostname(r) {
|
||||
if serverAddress == ConvertToHostname(r) {
|
||||
return ac, nil
|
||||
}
|
||||
}
|
||||
|
@ -53,3 +55,27 @@ func (c *fileStore) Store(authConfig types.AuthConfig) error {
|
|||
c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig
|
||||
return c.file.Save()
|
||||
}
|
||||
|
||||
func (c *fileStore) GetFilename() string {
|
||||
return c.file.GetFilename()
|
||||
}
|
||||
|
||||
func (c *fileStore) IsFileStore() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ConvertToHostname converts a registry url which has http|https prepended
|
||||
// to just an hostname.
|
||||
// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies.
|
||||
func ConvertToHostname(url string) string {
|
||||
stripped := url
|
||||
if strings.HasPrefix(url, "http://") {
|
||||
stripped = strings.TrimPrefix(url, "http://")
|
||||
} else if strings.HasPrefix(url, "https://") {
|
||||
stripped = strings.TrimPrefix(url, "https://")
|
||||
}
|
||||
|
||||
nameParts := strings.SplitN(stripped, "/", 2)
|
||||
|
||||
return nameParts[0]
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/cli/cli/config/credentials/native_store.go
generated
vendored
2
vendor/github.com/docker/cli/cli/config/credentials/native_store.go
generated
vendored
|
@ -1,9 +1,9 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
"github.com/docker/docker-credential-helpers/client"
|
||||
"github.com/docker/docker-credential-helpers/credentials"
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
22
vendor/github.com/docker/cli/cli/config/types/authconfig.go
generated
vendored
Normal file
22
vendor/github.com/docker/cli/cli/config/types/authconfig.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
package types
|
||||
|
||||
// AuthConfig contains authorization information for connecting to a Registry
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth,omitempty"`
|
||||
|
||||
// Email is an optional value associated with the username.
|
||||
// This field is deprecated and will be removed in a later
|
||||
// version of docker.
|
||||
Email string `json:"email,omitempty"`
|
||||
|
||||
ServerAddress string `json:"serveraddress,omitempty"`
|
||||
|
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
IdentityToken string `json:"identitytoken,omitempty"`
|
||||
|
||||
// RegistryToken is a bearer token to be sent to a registry
|
||||
RegistryToken string `json:"registrytoken,omitempty"`
|
||||
}
|
61
vendor/github.com/docker/cli/opts/envfile.go
generated
vendored
61
vendor/github.com/docker/cli/opts/envfile.go
generated
vendored
|
@ -1,13 +1,7 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ParseEnvFile reads a file with environment variables enumerated by lines
|
||||
|
@ -24,58 +18,5 @@ import (
|
|||
// environment variables, that's why we just strip leading whitespace and
|
||||
// nothing more.
|
||||
func ParseEnvFile(filename string) ([]string, error) {
|
||||
fh, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
lines := []string{}
|
||||
scanner := bufio.NewScanner(fh)
|
||||
currentLine := 0
|
||||
utf8bom := []byte{0xEF, 0xBB, 0xBF}
|
||||
for scanner.Scan() {
|
||||
scannedBytes := scanner.Bytes()
|
||||
if !utf8.Valid(scannedBytes) {
|
||||
return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes)
|
||||
}
|
||||
// We trim UTF8 BOM
|
||||
if currentLine == 0 {
|
||||
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
|
||||
}
|
||||
// trim the line from all leading whitespace first
|
||||
line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace)
|
||||
currentLine++
|
||||
// line is not empty, and not starting with '#'
|
||||
if len(line) > 0 && !strings.HasPrefix(line, "#") {
|
||||
data := strings.SplitN(line, "=", 2)
|
||||
|
||||
// trim the front of a variable, but nothing else
|
||||
variable := strings.TrimLeft(data[0], whiteSpaces)
|
||||
if strings.ContainsAny(variable, whiteSpaces) {
|
||||
return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)}
|
||||
}
|
||||
|
||||
if len(data) > 1 {
|
||||
|
||||
// pass the value through, no trimming
|
||||
lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1]))
|
||||
} else {
|
||||
// if only a pass-through variable is given, clean it up.
|
||||
lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line)))
|
||||
}
|
||||
}
|
||||
}
|
||||
return lines, scanner.Err()
|
||||
}
|
||||
|
||||
var whiteSpaces = " \t"
|
||||
|
||||
// ErrBadEnvVariable typed error for bad environment variable
|
||||
type ErrBadEnvVariable struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e ErrBadEnvVariable) Error() string {
|
||||
return fmt.Sprintf("poorly formatted environment: %s", e.msg)
|
||||
return parseKeyValueFile(filename, os.LookupEnv)
|
||||
}
|
||||
|
|
77
vendor/github.com/docker/cli/opts/file.go
generated
vendored
Normal file
77
vendor/github.com/docker/cli/opts/file.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const whiteSpaces = " \t"
|
||||
|
||||
// ErrBadKey typed error for bad environment variable
|
||||
type ErrBadKey struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e ErrBadKey) Error() string {
|
||||
return fmt.Sprintf("poorly formatted environment: %s", e.msg)
|
||||
}
|
||||
|
||||
func parseKeyValueFile(filename string, emptyFn func(string) (string, bool)) ([]string, error) {
|
||||
fh, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
lines := []string{}
|
||||
scanner := bufio.NewScanner(fh)
|
||||
currentLine := 0
|
||||
utf8bom := []byte{0xEF, 0xBB, 0xBF}
|
||||
for scanner.Scan() {
|
||||
scannedBytes := scanner.Bytes()
|
||||
if !utf8.Valid(scannedBytes) {
|
||||
return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes)
|
||||
}
|
||||
// We trim UTF8 BOM
|
||||
if currentLine == 0 {
|
||||
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
|
||||
}
|
||||
// trim the line from all leading whitespace first
|
||||
line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace)
|
||||
currentLine++
|
||||
// line is not empty, and not starting with '#'
|
||||
if len(line) > 0 && !strings.HasPrefix(line, "#") {
|
||||
data := strings.SplitN(line, "=", 2)
|
||||
|
||||
// trim the front of a variable, but nothing else
|
||||
variable := strings.TrimLeft(data[0], whiteSpaces)
|
||||
if strings.ContainsAny(variable, whiteSpaces) {
|
||||
return []string{}, ErrBadKey{fmt.Sprintf("variable '%s' contains whitespaces", variable)}
|
||||
}
|
||||
if len(variable) == 0 {
|
||||
return []string{}, ErrBadKey{fmt.Sprintf("no variable name on line '%s'", line)}
|
||||
}
|
||||
|
||||
if len(data) > 1 {
|
||||
// pass the value through, no trimming
|
||||
lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1]))
|
||||
} else {
|
||||
var value string
|
||||
var present bool
|
||||
if emptyFn != nil {
|
||||
value, present = emptyFn(line)
|
||||
}
|
||||
if present {
|
||||
// if only a pass-through variable is given, clean it up.
|
||||
lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), value))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return lines, scanner.Err()
|
||||
}
|
112
vendor/github.com/docker/cli/opts/gpus.go
generated
vendored
Normal file
112
vendor/github.com/docker/cli/opts/gpus.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GpuOpts is a Value type for parsing mounts
|
||||
type GpuOpts struct {
|
||||
values []container.DeviceRequest
|
||||
}
|
||||
|
||||
func parseCount(s string) (int, error) {
|
||||
if s == "all" {
|
||||
return -1, nil
|
||||
}
|
||||
i, err := strconv.Atoi(s)
|
||||
return i, errors.Wrap(err, "count must be an integer")
|
||||
}
|
||||
|
||||
// Set a new mount value
|
||||
// nolint: gocyclo
|
||||
func (o *GpuOpts) Set(value string) error {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req := container.DeviceRequest{}
|
||||
|
||||
seen := map[string]struct{}{}
|
||||
// Set writable as the default
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
key := parts[0]
|
||||
if _, ok := seen[key]; ok {
|
||||
return fmt.Errorf("gpu request key '%s' can be specified only once", key)
|
||||
}
|
||||
seen[key] = struct{}{}
|
||||
|
||||
if len(parts) == 1 {
|
||||
seen["count"] = struct{}{}
|
||||
req.Count, err = parseCount(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
value := parts[1]
|
||||
switch key {
|
||||
case "driver":
|
||||
req.Driver = value
|
||||
case "count":
|
||||
req.Count, err = parseCount(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "device":
|
||||
req.DeviceIDs = strings.Split(value, ",")
|
||||
case "capabilities":
|
||||
req.Capabilities = [][]string{append(strings.Split(value, ","), "gpu")}
|
||||
case "options":
|
||||
r := csv.NewReader(strings.NewReader(value))
|
||||
optFields, err := r.Read()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read gpu options")
|
||||
}
|
||||
req.Options = ConvertKVStringsToMap(optFields)
|
||||
default:
|
||||
return fmt.Errorf("unexpected key '%s' in '%s'", key, field)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := seen["count"]; !ok && req.DeviceIDs == nil {
|
||||
req.Count = 1
|
||||
}
|
||||
if req.Options == nil {
|
||||
req.Options = make(map[string]string)
|
||||
}
|
||||
if req.Capabilities == nil {
|
||||
req.Capabilities = [][]string{{"gpu"}}
|
||||
}
|
||||
|
||||
o.values = append(o.values, req)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of this option
|
||||
func (o *GpuOpts) Type() string {
|
||||
return "gpu-request"
|
||||
}
|
||||
|
||||
// String returns a string repr of this option
|
||||
func (o *GpuOpts) String() string {
|
||||
gpus := []string{}
|
||||
for _, gpu := range o.values {
|
||||
gpus = append(gpus, fmt.Sprintf("%v", gpu))
|
||||
}
|
||||
return strings.Join(gpus, ", ")
|
||||
}
|
||||
|
||||
// Value returns the mounts
|
||||
func (o *GpuOpts) Value() []container.DeviceRequest {
|
||||
return o.values
|
||||
}
|
2
vendor/github.com/docker/cli/opts/hosts.go
generated
vendored
2
vendor/github.com/docker/cli/opts/hosts.go
generated
vendored
|
@ -77,6 +77,8 @@ func parseDockerDaemonHost(addr string) (string, error) {
|
|||
return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
|
||||
case "fd":
|
||||
return addr, nil
|
||||
case "ssh":
|
||||
return addr, nil
|
||||
default:
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||
}
|
||||
|
|
8
vendor/github.com/docker/cli/opts/mount.go
generated
vendored
8
vendor/github.com/docker/cli/opts/mount.go
generated
vendored
|
@ -76,6 +76,9 @@ func (m *MountOpt) Set(value string) error {
|
|||
case "volume-nocopy":
|
||||
volumeOptions().NoCopy = true
|
||||
continue
|
||||
case "bind-nonrecursive":
|
||||
bindOptions().NonRecursive = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -100,6 +103,11 @@ func (m *MountOpt) Set(value string) error {
|
|||
mount.Consistency = mounttypes.Consistency(strings.ToLower(value))
|
||||
case "bind-propagation":
|
||||
bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value))
|
||||
case "bind-nonrecursive":
|
||||
bindOptions().NonRecursive, err = strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
||||
}
|
||||
case "volume-nocopy":
|
||||
volumeOptions().NoCopy, err = strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
|
|
14
vendor/github.com/docker/cli/opts/network.go
generated
vendored
14
vendor/github.com/docker/cli/opts/network.go
generated
vendored
|
@ -18,6 +18,10 @@ type NetworkAttachmentOpts struct {
|
|||
Target string
|
||||
Aliases []string
|
||||
DriverOpts map[string]string
|
||||
Links []string // TODO add support for links in the csv notation of `--network`
|
||||
IPv4Address string // TODO add support for IPv4-address in the csv notation of `--network`
|
||||
IPv6Address string // TODO add support for IPv6-address in the csv notation of `--network`
|
||||
LinkLocalIPs []string // TODO add support for LinkLocalIPs in the csv notation of `--network` ?
|
||||
}
|
||||
|
||||
// NetworkOpt represents a network config in swarm mode.
|
||||
|
@ -95,6 +99,16 @@ func (n *NetworkOpt) String() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
// NetworkMode return the network mode for the network option
|
||||
func (n *NetworkOpt) NetworkMode() string {
|
||||
networkIDOrName := "default"
|
||||
netOptVal := n.Value()
|
||||
if len(netOptVal) > 0 {
|
||||
networkIDOrName = netOptVal[0].Target
|
||||
}
|
||||
return networkIDOrName
|
||||
}
|
||||
|
||||
func parseDriverOpt(driverOpt string) (string, string, error) {
|
||||
parts := strings.SplitN(driverOpt, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
|
|
49
vendor/github.com/docker/cli/opts/opts.go
generated
vendored
49
vendor/github.com/docker/cli/opts/opts.go
generated
vendored
|
@ -10,6 +10,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -266,10 +267,24 @@ func validateDomain(val string) (string, error) {
|
|||
}
|
||||
|
||||
// ValidateLabel validates that the specified string is a valid label, and returns it.
|
||||
// Labels are in the form on key=value.
|
||||
//
|
||||
// Labels are in the form of key=value; key must be a non-empty string, and not
|
||||
// contain whitespaces. A value is optional (defaults to an empty string if omitted).
|
||||
//
|
||||
// Leading whitespace is removed during validation but values are kept as-is
|
||||
// otherwise, so any string value is accepted for both, which includes whitespace
|
||||
// (for values) and quotes (surrounding, or embedded in key or value).
|
||||
//
|
||||
// TODO discuss if quotes (and other special characters) should be valid or invalid for keys
|
||||
// TODO discuss if leading/trailing whitespace in keys should be preserved (and valid)
|
||||
func ValidateLabel(val string) (string, error) {
|
||||
if strings.Count(val, "=") < 1 {
|
||||
return "", fmt.Errorf("bad attribute format: %s", val)
|
||||
arr := strings.SplitN(val, "=", 2)
|
||||
key := strings.TrimLeft(arr[0], whiteSpaces)
|
||||
if key == "" {
|
||||
return "", fmt.Errorf("invalid label '%s': empty name", val)
|
||||
}
|
||||
if strings.ContainsAny(key, whiteSpaces) {
|
||||
return "", fmt.Errorf("label '%s' contains whitespaces", key)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
@ -306,6 +321,17 @@ func ValidateSysctl(val string) (string, error) {
|
|||
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
|
||||
}
|
||||
|
||||
// ValidateProgressOutput errors out if an invalid value is passed to --progress
|
||||
func ValidateProgressOutput(val string) error {
|
||||
valid := []string{"auto", "plain", "tty"}
|
||||
for _, s := range valid {
|
||||
if s == val {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("invalid value %q passed to --progress, valid values are: %s", val, strings.Join(valid, ", "))
|
||||
}
|
||||
|
||||
// FilterOpt is a flag type for validating filters
|
||||
type FilterOpt struct {
|
||||
filter filters.Args
|
||||
|
@ -317,7 +343,7 @@ func NewFilterOpt() FilterOpt {
|
|||
}
|
||||
|
||||
func (o *FilterOpt) String() string {
|
||||
repr, err := filters.ToParam(o.filter)
|
||||
repr, err := filters.ToJSON(o.filter)
|
||||
if err != nil {
|
||||
return "invalid filters"
|
||||
}
|
||||
|
@ -326,9 +352,18 @@ func (o *FilterOpt) String() string {
|
|||
|
||||
// Set sets the value of the opt by parsing the command line value
|
||||
func (o *FilterOpt) Set(value string) error {
|
||||
var err error
|
||||
o.filter, err = filters.ParseFlag(value, o.filter)
|
||||
return err
|
||||
if value == "" {
|
||||
return nil
|
||||
}
|
||||
if !strings.Contains(value, "=") {
|
||||
return errors.New("bad format of filter (expected name=value)")
|
||||
}
|
||||
f := strings.SplitN(value, "=", 2)
|
||||
name := strings.ToLower(strings.TrimSpace(f[0]))
|
||||
value = strings.TrimSpace(f[1])
|
||||
|
||||
o.filter.Add(name, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the option type
|
||||
|
|
22
vendor/github.com/docker/cli/opts/parse.go
generated
vendored
22
vendor/github.com/docker/cli/opts/parse.go
generated
vendored
|
@ -2,6 +2,7 @@ package opts
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
|
@ -11,18 +12,29 @@ import (
|
|||
// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys
|
||||
// present in the file with additional pairs specified in the override parameter
|
||||
func ReadKVStrings(files []string, override []string) ([]string, error) {
|
||||
envVariables := []string{}
|
||||
return readKVStrings(files, override, nil)
|
||||
}
|
||||
|
||||
// ReadKVEnvStrings reads a file of line terminated key=value pairs, and overrides any keys
|
||||
// present in the file with additional pairs specified in the override parameter.
|
||||
// If a key has no value, it will get the value from the environment.
|
||||
func ReadKVEnvStrings(files []string, override []string) ([]string, error) {
|
||||
return readKVStrings(files, override, os.LookupEnv)
|
||||
}
|
||||
|
||||
func readKVStrings(files []string, override []string, emptyFn func(string) (string, bool)) ([]string, error) {
|
||||
variables := []string{}
|
||||
for _, ef := range files {
|
||||
parsedVars, err := ParseEnvFile(ef)
|
||||
parsedVars, err := parseKeyValueFile(ef, emptyFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
envVariables = append(envVariables, parsedVars...)
|
||||
variables = append(variables, parsedVars...)
|
||||
}
|
||||
// parse the '-e' and '--env' after, to allow override
|
||||
envVariables = append(envVariables, override...)
|
||||
variables = append(variables, override...)
|
||||
|
||||
return envVariables, nil
|
||||
return variables, nil
|
||||
}
|
||||
|
||||
// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"}
|
||||
|
|
15
vendor/github.com/docker/cli/opts/port.go
generated
vendored
15
vendor/github.com/docker/cli/opts/port.go
generated
vendored
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -49,7 +50,7 @@ func (p *PortOpt) Set(value string) error {
|
|||
|
||||
switch key {
|
||||
case portOptProtocol:
|
||||
if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) {
|
||||
if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) && value != string(swarm.PortConfigProtocolSCTP) {
|
||||
return fmt.Errorf("invalid protocol value %s", value)
|
||||
}
|
||||
|
||||
|
@ -147,17 +148,25 @@ func ConvertPortToPortConfig(
|
|||
ports := []swarm.PortConfig{}
|
||||
|
||||
for _, binding := range portBindings[port] {
|
||||
hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16)
|
||||
if binding.HostIP != "" && binding.HostIP != "0.0.0.0" {
|
||||
logrus.Warnf("ignoring IP-address (%s:%s:%s) service will listen on '0.0.0.0'", binding.HostIP, binding.HostPort, port)
|
||||
}
|
||||
|
||||
startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort)
|
||||
|
||||
if err != nil && binding.HostPort != "" {
|
||||
return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port())
|
||||
}
|
||||
|
||||
for i := startHostPort; i <= endHostPort; i++ {
|
||||
ports = append(ports, swarm.PortConfig{
|
||||
//TODO Name: ?
|
||||
Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())),
|
||||
TargetPort: uint32(port.Int()),
|
||||
PublishedPort: uint32(hostPort),
|
||||
PublishedPort: uint32(i),
|
||||
PublishMode: swarm.PortConfigPublishModeIngress,
|
||||
})
|
||||
}
|
||||
}
|
||||
return ports, nil
|
||||
}
|
||||
|
|
12
vendor/github.com/docker/distribution/blobs.go
generated
vendored
12
vendor/github.com/docker/distribution/blobs.go
generated
vendored
|
@ -1,15 +1,16 @@
|
|||
package distribution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -66,12 +67,19 @@ type Descriptor struct {
|
|||
Size int64 `json:"size,omitempty"`
|
||||
|
||||
// Digest uniquely identifies the content. A byte stream can be verified
|
||||
// against against this digest.
|
||||
// against this digest.
|
||||
Digest digest.Digest `json:"digest,omitempty"`
|
||||
|
||||
// URLs contains the source URLs of this content.
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
|
||||
// Annotations contains arbitrary metadata relating to the targeted content.
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
|
||||
// Platform describes the platform which the image in the manifest runs on.
|
||||
// This should only be used when referring to a manifest.
|
||||
Platform *v1.Platform `json:"platform,omitempty"`
|
||||
|
||||
// NOTE: Before adding a field here, please ensure that all
|
||||
// other options have been exhausted. Much of the type relationships
|
||||
// depend on the simplicity of this type.
|
||||
|
|
85
vendor/github.com/docker/distribution/context/context.go
generated
vendored
85
vendor/github.com/docker/distribution/context/context.go
generated
vendored
|
@ -1,85 +0,0 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Context is a copy of Context from the golang.org/x/net/context package.
|
||||
type Context interface {
|
||||
context.Context
|
||||
}
|
||||
|
||||
// instanceContext is a context that provides only an instance id. It is
|
||||
// provided as the main background context.
|
||||
type instanceContext struct {
|
||||
Context
|
||||
id string // id of context, logged as "instance.id"
|
||||
once sync.Once // once protect generation of the id
|
||||
}
|
||||
|
||||
func (ic *instanceContext) Value(key interface{}) interface{} {
|
||||
if key == "instance.id" {
|
||||
ic.once.Do(func() {
|
||||
// We want to lazy initialize the UUID such that we don't
|
||||
// call a random generator from the package initialization
|
||||
// code. For various reasons random could not be available
|
||||
// https://github.com/docker/distribution/issues/782
|
||||
ic.id = uuid.Generate().String()
|
||||
})
|
||||
return ic.id
|
||||
}
|
||||
|
||||
return ic.Context.Value(key)
|
||||
}
|
||||
|
||||
var background = &instanceContext{
|
||||
Context: context.Background(),
|
||||
}
|
||||
|
||||
// Background returns a non-nil, empty Context. The background context
|
||||
// provides a single key, "instance.id" that is globally unique to the
|
||||
// process.
|
||||
func Background() Context {
|
||||
return background
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val. Use context Values only for request-scoped data that transits processes
|
||||
// and APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
||||
|
||||
// stringMapContext is a simple context implementation that checks a map for a
|
||||
// key, falling back to a parent if not present.
|
||||
type stringMapContext struct {
|
||||
context.Context
|
||||
m map[string]interface{}
|
||||
}
|
||||
|
||||
// WithValues returns a context that proxies lookups through a map. Only
|
||||
// supports string keys.
|
||||
func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
|
||||
mo := make(map[string]interface{}, len(m)) // make our own copy.
|
||||
for k, v := range m {
|
||||
mo[k] = v
|
||||
}
|
||||
|
||||
return stringMapContext{
|
||||
Context: ctx,
|
||||
m: mo,
|
||||
}
|
||||
}
|
||||
|
||||
func (smc stringMapContext) Value(key interface{}) interface{} {
|
||||
if ks, ok := key.(string); ok {
|
||||
if v, ok := smc.m[ks]; ok {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return smc.Context.Value(key)
|
||||
}
|
89
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
89
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
|
@ -1,89 +0,0 @@
|
|||
// Package context provides several utilities for working with
|
||||
// golang.org/x/net/context in http requests. Primarily, the focus is on
|
||||
// logging relevant request information but this package is not limited to
|
||||
// that purpose.
|
||||
//
|
||||
// The easiest way to get started is to get the background context:
|
||||
//
|
||||
// ctx := context.Background()
|
||||
//
|
||||
// The returned context should be passed around your application and be the
|
||||
// root of all other context instances. If the application has a version, this
|
||||
// line should be called before anything else:
|
||||
//
|
||||
// ctx := context.WithVersion(context.Background(), version)
|
||||
//
|
||||
// The above will store the version in the context and will be available to
|
||||
// the logger.
|
||||
//
|
||||
// Logging
|
||||
//
|
||||
// The most useful aspect of this package is GetLogger. This function takes
|
||||
// any context.Context interface and returns the current logger from the
|
||||
// context. Canonical usage looks like this:
|
||||
//
|
||||
// GetLogger(ctx).Infof("something interesting happened")
|
||||
//
|
||||
// GetLogger also takes optional key arguments. The keys will be looked up in
|
||||
// the context and reported with the logger. The following example would
|
||||
// return a logger that prints the version with each log message:
|
||||
//
|
||||
// ctx := context.Context(context.Background(), "version", version)
|
||||
// GetLogger(ctx, "version").Infof("this log message has a version field")
|
||||
//
|
||||
// The above would print out a log message like this:
|
||||
//
|
||||
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
|
||||
//
|
||||
// When used with WithLogger, we gain the ability to decorate the context with
|
||||
// loggers that have information from disparate parts of the call stack.
|
||||
// Following from the version example, we can build a new context with the
|
||||
// configured logger such that we always print the version field:
|
||||
//
|
||||
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
|
||||
//
|
||||
// Since the logger has been pushed to the context, we can now get the version
|
||||
// field for free with our log messages. Future calls to GetLogger on the new
|
||||
// context will have the version field:
|
||||
//
|
||||
// GetLogger(ctx).Infof("this log message has a version field")
|
||||
//
|
||||
// This becomes more powerful when we start stacking loggers. Let's say we
|
||||
// have the version logger from above but also want a request id. Using the
|
||||
// context above, in our request scoped function, we place another logger in
|
||||
// the context:
|
||||
//
|
||||
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
|
||||
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
|
||||
//
|
||||
// When GetLogger is called on the new context, "http.request.id" will be
|
||||
// included as a logger field, along with the original "version" field:
|
||||
//
|
||||
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
|
||||
//
|
||||
// Note that this only affects the new context, the previous context, with the
|
||||
// version field, can be used independently. Put another way, the new logger,
|
||||
// added to the request context, is unique to that context and can have
|
||||
// request scoped variables.
|
||||
//
|
||||
// HTTP Requests
|
||||
//
|
||||
// This package also contains several methods for working with http requests.
|
||||
// The concepts are very similar to those described above. We simply place the
|
||||
// request in the context using WithRequest. This makes the request variables
|
||||
// available. GetRequestLogger can then be called to get request specific
|
||||
// variables in a log line:
|
||||
//
|
||||
// ctx = WithRequest(ctx, req)
|
||||
// GetRequestLogger(ctx).Infof("request variables")
|
||||
//
|
||||
// Like above, if we want to include the request data in all log messages in
|
||||
// the context, we push the logger to a new context and use that one:
|
||||
//
|
||||
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
|
||||
//
|
||||
// The concept is fairly powerful and ensures that calls throughout the stack
|
||||
// can be traced in log messages. Using the fields like "http.request.id", one
|
||||
// can analyze call flow for a particular request with a simple grep of the
|
||||
// logs.
|
||||
package context
|
366
vendor/github.com/docker/distribution/context/http.go
generated
vendored
366
vendor/github.com/docker/distribution/context/http.go
generated
vendored
|
@ -1,366 +0,0 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Common errors used with this package.
|
||||
var (
|
||||
ErrNoRequestContext = errors.New("no http request in context")
|
||||
ErrNoResponseWriterContext = errors.New("no http response in context")
|
||||
)
|
||||
|
||||
func parseIP(ipStr string) net.IP {
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip == nil {
|
||||
log.Warnf("invalid remote IP address: %q", ipStr)
|
||||
}
|
||||
return ip
|
||||
}
|
||||
|
||||
// RemoteAddr extracts the remote address of the request, taking into
|
||||
// account proxy headers.
|
||||
func RemoteAddr(r *http.Request) string {
|
||||
if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
|
||||
proxies := strings.Split(prior, ",")
|
||||
if len(proxies) > 0 {
|
||||
remoteAddr := strings.Trim(proxies[0], " ")
|
||||
if parseIP(remoteAddr) != nil {
|
||||
return remoteAddr
|
||||
}
|
||||
}
|
||||
}
|
||||
// X-Real-Ip is less supported, but worth checking in the
|
||||
// absence of X-Forwarded-For
|
||||
if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
|
||||
if parseIP(realIP) != nil {
|
||||
return realIP
|
||||
}
|
||||
}
|
||||
|
||||
return r.RemoteAddr
|
||||
}
|
||||
|
||||
// RemoteIP extracts the remote IP of the request, taking into
|
||||
// account proxy headers.
|
||||
func RemoteIP(r *http.Request) string {
|
||||
addr := RemoteAddr(r)
|
||||
|
||||
// Try parsing it as "IP:port"
|
||||
if ip, _, err := net.SplitHostPort(addr); err == nil {
|
||||
return ip
|
||||
}
|
||||
|
||||
return addr
|
||||
}
|
||||
|
||||
// WithRequest places the request on the context. The context of the request
|
||||
// is assigned a unique id, available at "http.request.id". The request itself
|
||||
// is available at "http.request". Other common attributes are available under
|
||||
// the prefix "http.request.". If a request is already present on the context,
|
||||
// this method will panic.
|
||||
func WithRequest(ctx Context, r *http.Request) Context {
|
||||
if ctx.Value("http.request") != nil {
|
||||
// NOTE(stevvooe): This needs to be considered a programming error. It
|
||||
// is unlikely that we'd want to have more than one request in
|
||||
// context.
|
||||
panic("only one request per context")
|
||||
}
|
||||
|
||||
return &httpRequestContext{
|
||||
Context: ctx,
|
||||
startedAt: time.Now(),
|
||||
id: uuid.Generate().String(),
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
// GetRequest returns the http request in the given context. Returns
|
||||
// ErrNoRequestContext if the context does not have an http request associated
|
||||
// with it.
|
||||
func GetRequest(ctx Context) (*http.Request, error) {
|
||||
if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
|
||||
return r, nil
|
||||
}
|
||||
return nil, ErrNoRequestContext
|
||||
}
|
||||
|
||||
// GetRequestID attempts to resolve the current request id, if possible. An
|
||||
// error is return if it is not available on the context.
|
||||
func GetRequestID(ctx Context) string {
|
||||
return GetStringValue(ctx, "http.request.id")
|
||||
}
|
||||
|
||||
// WithResponseWriter returns a new context and response writer that makes
|
||||
// interesting response statistics available within the context.
|
||||
func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
|
||||
if closeNotifier, ok := w.(http.CloseNotifier); ok {
|
||||
irwCN := &instrumentedResponseWriterCN{
|
||||
instrumentedResponseWriter: instrumentedResponseWriter{
|
||||
ResponseWriter: w,
|
||||
Context: ctx,
|
||||
},
|
||||
CloseNotifier: closeNotifier,
|
||||
}
|
||||
|
||||
return irwCN, irwCN
|
||||
}
|
||||
|
||||
irw := instrumentedResponseWriter{
|
||||
ResponseWriter: w,
|
||||
Context: ctx,
|
||||
}
|
||||
return &irw, &irw
|
||||
}
|
||||
|
||||
// GetResponseWriter returns the http.ResponseWriter from the provided
|
||||
// context. If not present, ErrNoResponseWriterContext is returned. The
|
||||
// returned instance provides instrumentation in the context.
|
||||
func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
|
||||
v := ctx.Value("http.response")
|
||||
|
||||
rw, ok := v.(http.ResponseWriter)
|
||||
if !ok || rw == nil {
|
||||
return nil, ErrNoResponseWriterContext
|
||||
}
|
||||
|
||||
return rw, nil
|
||||
}
|
||||
|
||||
// getVarsFromRequest let's us change request vars implementation for testing
|
||||
// and maybe future changes.
|
||||
var getVarsFromRequest = mux.Vars
|
||||
|
||||
// WithVars extracts gorilla/mux vars and makes them available on the returned
|
||||
// context. Variables are available at keys with the prefix "vars.". For
|
||||
// example, if looking for the variable "name", it can be accessed as
|
||||
// "vars.name". Implementations that are accessing values need not know that
|
||||
// the underlying context is implemented with gorilla/mux vars.
|
||||
func WithVars(ctx Context, r *http.Request) Context {
|
||||
return &muxVarsContext{
|
||||
Context: ctx,
|
||||
vars: getVarsFromRequest(r),
|
||||
}
|
||||
}
|
||||
|
||||
// GetRequestLogger returns a logger that contains fields from the request in
|
||||
// the current context. If the request is not available in the context, no
|
||||
// fields will display. Request loggers can safely be pushed onto the context.
|
||||
func GetRequestLogger(ctx Context) Logger {
|
||||
return GetLogger(ctx,
|
||||
"http.request.id",
|
||||
"http.request.method",
|
||||
"http.request.host",
|
||||
"http.request.uri",
|
||||
"http.request.referer",
|
||||
"http.request.useragent",
|
||||
"http.request.remoteaddr",
|
||||
"http.request.contenttype")
|
||||
}
|
||||
|
||||
// GetResponseLogger reads the current response stats and builds a logger.
|
||||
// Because the values are read at call time, pushing a logger returned from
|
||||
// this function on the context will lead to missing or invalid data. Only
|
||||
// call this at the end of a request, after the response has been written.
|
||||
func GetResponseLogger(ctx Context) Logger {
|
||||
l := getLogrusLogger(ctx,
|
||||
"http.response.written",
|
||||
"http.response.status",
|
||||
"http.response.contenttype")
|
||||
|
||||
duration := Since(ctx, "http.request.startedat")
|
||||
|
||||
if duration > 0 {
|
||||
l = l.WithField("http.response.duration", duration.String())
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// httpRequestContext makes information about a request available to context.
|
||||
type httpRequestContext struct {
|
||||
Context
|
||||
|
||||
startedAt time.Time
|
||||
id string
|
||||
r *http.Request
|
||||
}
|
||||
|
||||
// Value returns a keyed element of the request for use in the context. To get
|
||||
// the request itself, query "request". For other components, access them as
|
||||
// "request.<component>". For example, r.RequestURI
|
||||
func (ctx *httpRequestContext) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "http.request" {
|
||||
return ctx.r
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(keyStr, "http.request.") {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
parts := strings.Split(keyStr, ".")
|
||||
|
||||
if len(parts) != 3 {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
switch parts[2] {
|
||||
case "uri":
|
||||
return ctx.r.RequestURI
|
||||
case "remoteaddr":
|
||||
return RemoteAddr(ctx.r)
|
||||
case "method":
|
||||
return ctx.r.Method
|
||||
case "host":
|
||||
return ctx.r.Host
|
||||
case "referer":
|
||||
referer := ctx.r.Referer()
|
||||
if referer != "" {
|
||||
return referer
|
||||
}
|
||||
case "useragent":
|
||||
return ctx.r.UserAgent()
|
||||
case "id":
|
||||
return ctx.id
|
||||
case "startedat":
|
||||
return ctx.startedAt
|
||||
case "contenttype":
|
||||
ct := ctx.r.Header.Get("Content-Type")
|
||||
if ct != "" {
|
||||
return ct
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fallback:
|
||||
return ctx.Context.Value(key)
|
||||
}
|
||||
|
||||
type muxVarsContext struct {
|
||||
Context
|
||||
vars map[string]string
|
||||
}
|
||||
|
||||
func (ctx *muxVarsContext) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "vars" {
|
||||
return ctx.vars
|
||||
}
|
||||
|
||||
if strings.HasPrefix(keyStr, "vars.") {
|
||||
keyStr = strings.TrimPrefix(keyStr, "vars.")
|
||||
}
|
||||
|
||||
if v, ok := ctx.vars[keyStr]; ok {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.Context.Value(key)
|
||||
}
|
||||
|
||||
// instrumentedResponseWriterCN provides response writer information in a
|
||||
// context. It implements http.CloseNotifier so that users can detect
|
||||
// early disconnects.
|
||||
type instrumentedResponseWriterCN struct {
|
||||
instrumentedResponseWriter
|
||||
http.CloseNotifier
|
||||
}
|
||||
|
||||
// instrumentedResponseWriter provides response writer information in a
|
||||
// context. This variant is only used in the case where CloseNotifier is not
|
||||
// implemented by the parent ResponseWriter.
|
||||
type instrumentedResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
Context
|
||||
|
||||
mu sync.Mutex
|
||||
status int
|
||||
written int64
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = irw.ResponseWriter.Write(p)
|
||||
|
||||
irw.mu.Lock()
|
||||
irw.written += int64(n)
|
||||
|
||||
// Guess the likely status if not set.
|
||||
if irw.status == 0 {
|
||||
irw.status = http.StatusOK
|
||||
}
|
||||
|
||||
irw.mu.Unlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) WriteHeader(status int) {
|
||||
irw.ResponseWriter.WriteHeader(status)
|
||||
|
||||
irw.mu.Lock()
|
||||
irw.status = status
|
||||
irw.mu.Unlock()
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) Flush() {
|
||||
if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "http.response" {
|
||||
return irw
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(keyStr, "http.response.") {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
parts := strings.Split(keyStr, ".")
|
||||
|
||||
if len(parts) != 3 {
|
||||
goto fallback
|
||||
}
|
||||
|
||||
irw.mu.Lock()
|
||||
defer irw.mu.Unlock()
|
||||
|
||||
switch parts[2] {
|
||||
case "written":
|
||||
return irw.written
|
||||
case "status":
|
||||
return irw.status
|
||||
case "contenttype":
|
||||
contentType := irw.Header().Get("Content-Type")
|
||||
if contentType != "" {
|
||||
return contentType
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fallback:
|
||||
return irw.Context.Value(key)
|
||||
}
|
||||
|
||||
func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} {
|
||||
if keyStr, ok := key.(string); ok {
|
||||
if keyStr == "http.response" {
|
||||
return irw
|
||||
}
|
||||
}
|
||||
|
||||
return irw.instrumentedResponseWriter.Value(key)
|
||||
}
|
116
vendor/github.com/docker/distribution/context/logger.go
generated
vendored
116
vendor/github.com/docker/distribution/context/logger.go
generated
vendored
|
@ -1,116 +0,0 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Logger provides a leveled-logging interface.
|
||||
type Logger interface {
|
||||
// standard logger methods
|
||||
Print(args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Println(args ...interface{})
|
||||
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Fatalln(args ...interface{})
|
||||
|
||||
Panic(args ...interface{})
|
||||
Panicf(format string, args ...interface{})
|
||||
Panicln(args ...interface{})
|
||||
|
||||
// Leveled methods, from logrus
|
||||
Debug(args ...interface{})
|
||||
Debugf(format string, args ...interface{})
|
||||
Debugln(args ...interface{})
|
||||
|
||||
Error(args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
Errorln(args ...interface{})
|
||||
|
||||
Info(args ...interface{})
|
||||
Infof(format string, args ...interface{})
|
||||
Infoln(args ...interface{})
|
||||
|
||||
Warn(args ...interface{})
|
||||
Warnf(format string, args ...interface{})
|
||||
Warnln(args ...interface{})
|
||||
}
|
||||
|
||||
// WithLogger creates a new context with provided logger.
|
||||
func WithLogger(ctx Context, logger Logger) Context {
|
||||
return WithValue(ctx, "logger", logger)
|
||||
}
|
||||
|
||||
// GetLoggerWithField returns a logger instance with the specified field key
|
||||
// and value without affecting the context. Extra specified keys will be
|
||||
// resolved from the context.
|
||||
func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
|
||||
return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
|
||||
}
|
||||
|
||||
// GetLoggerWithFields returns a logger instance with the specified fields
|
||||
// without affecting the context. Extra specified keys will be resolved from
|
||||
// the context.
|
||||
func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
|
||||
// must convert from interface{} -> interface{} to string -> interface{} for logrus.
|
||||
lfields := make(logrus.Fields, len(fields))
|
||||
for key, value := range fields {
|
||||
lfields[fmt.Sprint(key)] = value
|
||||
}
|
||||
|
||||
return getLogrusLogger(ctx, keys...).WithFields(lfields)
|
||||
}
|
||||
|
||||
// GetLogger returns the logger from the current context, if present. If one
|
||||
// or more keys are provided, they will be resolved on the context and
|
||||
// included in the logger. While context.Value takes an interface, any key
|
||||
// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
|
||||
// a logging key field. If context keys are integer constants, for example,
|
||||
// its recommended that a String method is implemented.
|
||||
func GetLogger(ctx Context, keys ...interface{}) Logger {
|
||||
return getLogrusLogger(ctx, keys...)
|
||||
}
|
||||
|
||||
// GetLogrusLogger returns the logrus logger for the context. If one more keys
|
||||
// are provided, they will be resolved on the context and included in the
|
||||
// logger. Only use this function if specific logrus functionality is
|
||||
// required.
|
||||
func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
|
||||
var logger *logrus.Entry
|
||||
|
||||
// Get a logger, if it is present.
|
||||
loggerInterface := ctx.Value("logger")
|
||||
if loggerInterface != nil {
|
||||
if lgr, ok := loggerInterface.(*logrus.Entry); ok {
|
||||
logger = lgr
|
||||
}
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
fields := logrus.Fields{}
|
||||
|
||||
// Fill in the instance id, if we have it.
|
||||
instanceID := ctx.Value("instance.id")
|
||||
if instanceID != nil {
|
||||
fields["instance.id"] = instanceID
|
||||
}
|
||||
|
||||
fields["go.version"] = runtime.Version()
|
||||
// If no logger is found, just return the standard logger.
|
||||
logger = logrus.StandardLogger().WithFields(fields)
|
||||
}
|
||||
|
||||
fields := logrus.Fields{}
|
||||
for _, key := range keys {
|
||||
v := ctx.Value(key)
|
||||
if v != nil {
|
||||
fields[fmt.Sprint(key)] = v
|
||||
}
|
||||
}
|
||||
|
||||
return logger.WithFields(fields)
|
||||
}
|
104
vendor/github.com/docker/distribution/context/trace.go
generated
vendored
104
vendor/github.com/docker/distribution/context/trace.go
generated
vendored
|
@ -1,104 +0,0 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
)
|
||||
|
||||
// WithTrace allocates a traced timing span in a new context. This allows a
|
||||
// caller to track the time between calling WithTrace and the returned done
|
||||
// function. When the done function is called, a log message is emitted with a
|
||||
// "trace.duration" field, corresponding to the elapsed time and a
|
||||
// "trace.func" field, corresponding to the function that called WithTrace.
|
||||
//
|
||||
// The logging keys "trace.id" and "trace.parent.id" are provided to implement
|
||||
// dapper-like tracing. This function should be complemented with a WithSpan
|
||||
// method that could be used for tracing distributed RPC calls.
|
||||
//
|
||||
// The main benefit of this function is to post-process log messages or
|
||||
// intercept them in a hook to provide timing data. Trace ids and parent ids
|
||||
// can also be linked to provide call tracing, if so required.
|
||||
//
|
||||
// Here is an example of the usage:
|
||||
//
|
||||
// func timedOperation(ctx Context) {
|
||||
// ctx, done := WithTrace(ctx)
|
||||
// defer done("this will be the log message")
|
||||
// // ... function body ...
|
||||
// }
|
||||
//
|
||||
// If the function ran for roughly 1s, such a usage would emit a log message
|
||||
// as follows:
|
||||
//
|
||||
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
|
||||
//
|
||||
// Notice that the function name is automatically resolved, along with the
|
||||
// package and a trace id is emitted that can be linked with parent ids.
|
||||
func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
|
||||
if ctx == nil {
|
||||
ctx = Background()
|
||||
}
|
||||
|
||||
pc, file, line, _ := runtime.Caller(1)
|
||||
f := runtime.FuncForPC(pc)
|
||||
ctx = &traced{
|
||||
Context: ctx,
|
||||
id: uuid.Generate().String(),
|
||||
start: time.Now(),
|
||||
parent: GetStringValue(ctx, "trace.id"),
|
||||
fnname: f.Name(),
|
||||
file: file,
|
||||
line: line,
|
||||
}
|
||||
|
||||
return ctx, func(format string, a ...interface{}) {
|
||||
GetLogger(ctx,
|
||||
"trace.duration",
|
||||
"trace.id",
|
||||
"trace.parent.id",
|
||||
"trace.func",
|
||||
"trace.file",
|
||||
"trace.line").
|
||||
Debugf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// traced represents a context that is traced for function call timing. It
|
||||
// also provides fast lookup for the various attributes that are available on
|
||||
// the trace.
|
||||
type traced struct {
|
||||
Context
|
||||
id string
|
||||
parent string
|
||||
start time.Time
|
||||
fnname string
|
||||
file string
|
||||
line int
|
||||
}
|
||||
|
||||
func (ts *traced) Value(key interface{}) interface{} {
|
||||
switch key {
|
||||
case "trace.start":
|
||||
return ts.start
|
||||
case "trace.duration":
|
||||
return time.Since(ts.start)
|
||||
case "trace.id":
|
||||
return ts.id
|
||||
case "trace.parent.id":
|
||||
if ts.parent == "" {
|
||||
return nil // must return nil to signal no parent.
|
||||
}
|
||||
|
||||
return ts.parent
|
||||
case "trace.func":
|
||||
return ts.fnname
|
||||
case "trace.file":
|
||||
return ts.file
|
||||
case "trace.line":
|
||||
return ts.line
|
||||
}
|
||||
|
||||
return ts.Context.Value(key)
|
||||
}
|
24
vendor/github.com/docker/distribution/context/util.go
generated
vendored
24
vendor/github.com/docker/distribution/context/util.go
generated
vendored
|
@ -1,24 +0,0 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Since looks up key, which should be a time.Time, and returns the duration
|
||||
// since that time. If the key is not found, the value returned will be zero.
|
||||
// This is helpful when inferring metrics related to context execution times.
|
||||
func Since(ctx Context, key interface{}) time.Duration {
|
||||
if startedAt, ok := ctx.Value(key).(time.Time); ok {
|
||||
return time.Since(startedAt)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetStringValue returns a string value from the context. The empty string
|
||||
// will be returned if not found.
|
||||
func GetStringValue(ctx Context, key interface{}) (value string) {
|
||||
if valuev, ok := ctx.Value(key).(string); ok {
|
||||
value = valuev
|
||||
}
|
||||
return value
|
||||
}
|
16
vendor/github.com/docker/distribution/context/version.go
generated
vendored
16
vendor/github.com/docker/distribution/context/version.go
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
package context
|
||||
|
||||
// WithVersion stores the application version in the context. The new context
|
||||
// gets a logger to ensure log messages are marked with the application
|
||||
// version.
|
||||
func WithVersion(ctx Context, version string) Context {
|
||||
ctx = WithValue(ctx, "version", version)
|
||||
// push a new logger onto the stack
|
||||
return WithLogger(ctx, GetLogger(ctx, "version"))
|
||||
}
|
||||
|
||||
// GetVersion returns the application version from the context. An empty
|
||||
// string may returned if the version was not set on the context.
|
||||
func GetVersion(ctx Context) string {
|
||||
return GetStringValue(ctx, "version")
|
||||
}
|
4
vendor/github.com/docker/distribution/errors.go
generated
vendored
4
vendor/github.com/docker/distribution/errors.go
generated
vendored
|
@ -20,6 +20,10 @@ var ErrManifestNotModified = errors.New("manifest not modified")
|
|||
// performed
|
||||
var ErrUnsupported = errors.New("operation unsupported")
|
||||
|
||||
// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1
|
||||
// manifest but the registry is configured to reject it
|
||||
var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported")
|
||||
|
||||
// ErrTagUnknown is returned if the given tag is not known by the tag service
|
||||
type ErrTagUnknown struct {
|
||||
Tag string
|
||||
|
|
2
vendor/github.com/docker/distribution/manifests.go
generated
vendored
2
vendor/github.com/docker/distribution/manifests.go
generated
vendored
|
@ -1,10 +1,10 @@
|
|||
package distribution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"mime"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
|
|
13
vendor/github.com/docker/distribution/metrics/prometheus.go
generated
vendored
Normal file
13
vendor/github.com/docker/distribution/metrics/prometheus.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
package metrics
|
||||
|
||||
import "github.com/docker/go-metrics"
|
||||
|
||||
const (
|
||||
// NamespacePrefix is the namespace of prometheus metrics
|
||||
NamespacePrefix = "registry"
|
||||
)
|
||||
|
||||
var (
|
||||
// StorageNamespace is the prometheus namespace of blob/cache related operations
|
||||
StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil)
|
||||
)
|
23
vendor/github.com/docker/distribution/registry.go
generated
vendored
23
vendor/github.com/docker/distribution/registry.go
generated
vendored
|
@ -1,7 +1,8 @@
|
|||
package distribution
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution/context"
|
||||
"context"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
)
|
||||
|
||||
|
@ -53,6 +54,11 @@ type RepositoryEnumerator interface {
|
|||
Enumerate(ctx context.Context, ingester func(string) error) error
|
||||
}
|
||||
|
||||
// RepositoryRemover removes given repository
|
||||
type RepositoryRemover interface {
|
||||
Remove(ctx context.Context, name reference.Named) error
|
||||
}
|
||||
|
||||
// ManifestServiceOption is a function argument for Manifest Service methods
|
||||
type ManifestServiceOption interface {
|
||||
Apply(ManifestService) error
|
||||
|
@ -72,6 +78,21 @@ func (o WithTagOption) Apply(m ManifestService) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// WithManifestMediaTypes lists the media types the client wishes
|
||||
// the server to provide.
|
||||
func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption {
|
||||
return WithManifestMediaTypesOption{mediaTypes}
|
||||
}
|
||||
|
||||
// WithManifestMediaTypesOption holds a list of accepted media types
|
||||
type WithManifestMediaTypesOption struct{ MediaTypes []string }
|
||||
|
||||
// Apply conforms to the ManifestServiceOption interface
|
||||
func (o WithManifestMediaTypesOption) Apply(m ManifestService) error {
|
||||
// no implementation
|
||||
return nil
|
||||
}
|
||||
|
||||
// Repository is a named collection of manifests and layers.
|
||||
type Repository interface {
|
||||
// Named returns the name of the repository.
|
||||
|
|
6
vendor/github.com/docker/distribution/registry/api/errcode/handler.go
generated
vendored
6
vendor/github.com/docker/distribution/registry/api/errcode/handler.go
generated
vendored
|
@ -36,9 +36,5 @@ func ServeJSON(w http.ResponseWriter, err error) error {
|
|||
|
||||
w.WriteHeader(sc)
|
||||
|
||||
if err := json.NewEncoder(w).Encode(err); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return json.NewEncoder(w).Encode(err)
|
||||
}
|
||||
|
|
9
vendor/github.com/docker/distribution/registry/api/v2/routes.go
generated
vendored
9
vendor/github.com/docker/distribution/registry/api/v2/routes.go
generated
vendored
|
@ -14,15 +14,6 @@ const (
|
|||
RouteNameCatalog = "catalog"
|
||||
)
|
||||
|
||||
var allEndpoints = []string{
|
||||
RouteNameManifest,
|
||||
RouteNameCatalog,
|
||||
RouteNameTags,
|
||||
RouteNameBlob,
|
||||
RouteNameBlobUpload,
|
||||
RouteNameBlobUploadChunk,
|
||||
}
|
||||
|
||||
// Router builds a gorilla router with named routes for the various API
|
||||
// methods. This can be used directly by both server implementations and
|
||||
// clients.
|
||||
|
|
|
@ -45,13 +45,13 @@ type Manager interface {
|
|||
// to a backend.
|
||||
func NewSimpleManager() Manager {
|
||||
return &simpleManager{
|
||||
Challanges: make(map[string][]Challenge),
|
||||
Challenges: make(map[string][]Challenge),
|
||||
}
|
||||
}
|
||||
|
||||
type simpleManager struct {
|
||||
sync.RWMutex
|
||||
Challanges map[string][]Challenge
|
||||
Challenges map[string][]Challenge
|
||||
}
|
||||
|
||||
func normalizeURL(endpoint *url.URL) {
|
||||
|
@ -64,7 +64,7 @@ func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
|
|||
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
challenges := m.Challanges[endpoint.String()]
|
||||
challenges := m.Challenges[endpoint.String()]
|
||||
return challenges, nil
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ func (m *simpleManager) AddResponse(resp *http.Response) error {
|
|||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.Challanges[urlCopy.String()] = challenges
|
||||
m.Challenges[urlCopy.String()] = challenges
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
35
vendor/github.com/docker/distribution/registry/client/auth/session.go
generated
vendored
35
vendor/github.com/docker/distribution/registry/client/auth/session.go
generated
vendored
|
@ -13,7 +13,6 @@ import (
|
|||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -69,7 +68,6 @@ func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler)
|
|||
type endpointAuthorizer struct {
|
||||
challenges challenge.Manager
|
||||
handlers []AuthenticationHandler
|
||||
transport http.RoundTripper
|
||||
}
|
||||
|
||||
func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
|
||||
|
@ -122,7 +120,6 @@ type clock interface {
|
|||
}
|
||||
|
||||
type tokenHandler struct {
|
||||
header http.Header
|
||||
creds CredentialStore
|
||||
transport http.RoundTripper
|
||||
clock clock
|
||||
|
@ -135,6 +132,8 @@ type tokenHandler struct {
|
|||
tokenLock sync.Mutex
|
||||
tokenCache string
|
||||
tokenExpiration time.Time
|
||||
|
||||
logger Logger
|
||||
}
|
||||
|
||||
// Scope is a type which is serializable to a string
|
||||
|
@ -176,6 +175,18 @@ func (rs RegistryScope) String() string {
|
|||
return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ","))
|
||||
}
|
||||
|
||||
// Logger defines the injectable logging interface, used on TokenHandlers.
|
||||
type Logger interface {
|
||||
Debugf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
func logDebugf(logger Logger, format string, args ...interface{}) {
|
||||
if logger == nil {
|
||||
return
|
||||
}
|
||||
logger.Debugf(format, args...)
|
||||
}
|
||||
|
||||
// TokenHandlerOptions is used to configure a new token handler
|
||||
type TokenHandlerOptions struct {
|
||||
Transport http.RoundTripper
|
||||
|
@ -185,6 +196,7 @@ type TokenHandlerOptions struct {
|
|||
ForceOAuth bool
|
||||
ClientID string
|
||||
Scopes []Scope
|
||||
Logger Logger
|
||||
}
|
||||
|
||||
// An implementation of clock for providing real time data.
|
||||
|
@ -220,6 +232,7 @@ func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandl
|
|||
clientID: options.ClientID,
|
||||
scopes: options.Scopes,
|
||||
clock: realClock{},
|
||||
logger: options.Logger,
|
||||
}
|
||||
|
||||
return handler
|
||||
|
@ -264,6 +277,9 @@ func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...s
|
|||
}
|
||||
var addedScopes bool
|
||||
for _, scope := range additionalScopes {
|
||||
if hasScope(scopes, scope) {
|
||||
continue
|
||||
}
|
||||
scopes = append(scopes, scope)
|
||||
addedScopes = true
|
||||
}
|
||||
|
@ -287,6 +303,15 @@ func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...s
|
|||
return th.tokenCache, nil
|
||||
}
|
||||
|
||||
func hasScope(scopes []string, scope string) bool {
|
||||
for _, s := range scopes {
|
||||
if s == scope {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type postTokenResponse struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
|
@ -348,7 +373,7 @@ func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, servic
|
|||
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
|
||||
// The default/minimum lifetime.
|
||||
tr.ExpiresIn = minimumTokenLifetimeSeconds
|
||||
logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
|
||||
logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn)
|
||||
}
|
||||
|
||||
if tr.IssuedAt.IsZero() {
|
||||
|
@ -439,7 +464,7 @@ func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string,
|
|||
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
|
||||
// The default/minimum lifetime.
|
||||
tr.ExpiresIn = minimumTokenLifetimeSeconds
|
||||
logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
|
||||
logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn)
|
||||
}
|
||||
|
||||
if tr.IssuedAt.IsZero() {
|
||||
|
|
2
vendor/github.com/docker/distribution/registry/client/blob_writer.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/client/blob_writer.go
generated
vendored
|
@ -2,6 +2,7 @@ package client
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -9,7 +10,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
)
|
||||
|
||||
type httpBlobUpload struct {
|
||||
|
|
48
vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
48
vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
|
@ -2,6 +2,7 @@ package client
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -14,7 +15,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
|
@ -62,7 +62,7 @@ func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
|
|||
}
|
||||
|
||||
// NewRegistry creates a registry namespace which can be used to get a listing of repositories
|
||||
func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
|
||||
func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) {
|
||||
ub, err := v2.NewURLBuilderFromString(baseURL, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -77,14 +77,12 @@ func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTrippe
|
|||
return ®istry{
|
||||
client: client,
|
||||
ub: ub,
|
||||
context: ctx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type registry struct {
|
||||
client *http.Client
|
||||
ub *v2.URLBuilder
|
||||
context context.Context
|
||||
}
|
||||
|
||||
// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
|
||||
|
@ -133,7 +131,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri
|
|||
}
|
||||
|
||||
// NewRepository creates a new Repository for the given repository name and base URL.
|
||||
func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
|
||||
func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
|
||||
ub, err := v2.NewURLBuilderFromString(baseURL, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -149,14 +147,12 @@ func NewRepository(ctx context.Context, name reference.Named, baseURL string, tr
|
|||
client: client,
|
||||
ub: ub,
|
||||
name: name,
|
||||
context: ctx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type repository struct {
|
||||
client *http.Client
|
||||
ub *v2.URLBuilder
|
||||
context context.Context
|
||||
name reference.Named
|
||||
}
|
||||
|
||||
|
@ -192,7 +188,6 @@ func (r *repository) Tags(ctx context.Context) distribution.TagService {
|
|||
return &tags{
|
||||
client: r.client,
|
||||
ub: r.ub,
|
||||
context: r.context,
|
||||
name: r.Named(),
|
||||
}
|
||||
}
|
||||
|
@ -201,7 +196,6 @@ func (r *repository) Tags(ctx context.Context) distribution.TagService {
|
|||
type tags struct {
|
||||
client *http.Client
|
||||
ub *v2.URLBuilder
|
||||
context context.Context
|
||||
name reference.Named
|
||||
}
|
||||
|
||||
|
@ -209,13 +203,18 @@ type tags struct {
|
|||
func (t *tags) All(ctx context.Context) ([]string, error) {
|
||||
var tags []string
|
||||
|
||||
u, err := t.ub.BuildTagsURL(t.name)
|
||||
listURLStr, err := t.ub.BuildTagsURL(t.name)
|
||||
if err != nil {
|
||||
return tags, err
|
||||
}
|
||||
|
||||
listURL, err := url.Parse(listURLStr)
|
||||
if err != nil {
|
||||
return tags, err
|
||||
}
|
||||
|
||||
for {
|
||||
resp, err := t.client.Get(u)
|
||||
resp, err := t.client.Get(listURL.String())
|
||||
if err != nil {
|
||||
return tags, err
|
||||
}
|
||||
|
@ -235,7 +234,13 @@ func (t *tags) All(ctx context.Context) ([]string, error) {
|
|||
}
|
||||
tags = append(tags, tagsResponse.Tags...)
|
||||
if link := resp.Header.Get("Link"); link != "" {
|
||||
u = strings.Trim(strings.Split(link, ";")[0], "<>")
|
||||
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
|
||||
linkURL, err := url.Parse(linkURLStr)
|
||||
if err != nil {
|
||||
return tags, err
|
||||
}
|
||||
|
||||
listURL = listURL.ResolveReference(linkURL)
|
||||
} else {
|
||||
return tags, nil
|
||||
}
|
||||
|
@ -321,7 +326,8 @@ func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, er
|
|||
defer resp.Body.Close()
|
||||
|
||||
switch {
|
||||
case resp.StatusCode >= 200 && resp.StatusCode < 400:
|
||||
case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0:
|
||||
// if the response is a success AND a Docker-Content-Digest can be retrieved from the headers
|
||||
return descriptorFromResponse(resp)
|
||||
default:
|
||||
// if the response is an error - there will be no body to decode.
|
||||
|
@ -421,18 +427,22 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
|
|||
ref reference.Named
|
||||
err error
|
||||
contentDgst *digest.Digest
|
||||
mediaTypes []string
|
||||
)
|
||||
|
||||
for _, option := range options {
|
||||
if opt, ok := option.(distribution.WithTagOption); ok {
|
||||
switch opt := option.(type) {
|
||||
case distribution.WithTagOption:
|
||||
digestOrTag = opt.Tag
|
||||
ref, err = reference.WithTag(ms.name, opt.Tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if opt, ok := option.(contentDigestOption); ok {
|
||||
case contentDigestOption:
|
||||
contentDgst = opt.digest
|
||||
} else {
|
||||
case distribution.WithManifestMediaTypesOption:
|
||||
mediaTypes = opt.MediaTypes
|
||||
default:
|
||||
err := option.Apply(ms)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -448,6 +458,10 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
|
|||
}
|
||||
}
|
||||
|
||||
if len(mediaTypes) == 0 {
|
||||
mediaTypes = distribution.ManifestMediaTypes()
|
||||
}
|
||||
|
||||
u, err := ms.ub.BuildManifestURL(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -458,7 +472,7 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
|
|||
return nil, err
|
||||
}
|
||||
|
||||
for _, t := range distribution.ManifestMediaTypes() {
|
||||
for _, t := range mediaTypes {
|
||||
req.Header.Add("Accept", t)
|
||||
}
|
||||
|
||||
|
|
9
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
9
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
@ -97,7 +96,7 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
|||
|
||||
lastReaderOffset := hrs.readerOffset
|
||||
|
||||
if whence == os.SEEK_SET && hrs.rc == nil {
|
||||
if whence == io.SeekStart && hrs.rc == nil {
|
||||
// If no request has been made yet, and we are seeking to an
|
||||
// absolute position, set the read offset as well to avoid an
|
||||
// unnecessary request.
|
||||
|
@ -113,14 +112,14 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
|||
newOffset := hrs.seekOffset
|
||||
|
||||
switch whence {
|
||||
case os.SEEK_CUR:
|
||||
case io.SeekCurrent:
|
||||
newOffset += offset
|
||||
case os.SEEK_END:
|
||||
case io.SeekEnd:
|
||||
if hrs.size < 0 {
|
||||
return 0, errors.New("content length not known")
|
||||
}
|
||||
newOffset = hrs.size + offset
|
||||
case os.SEEK_SET:
|
||||
case io.SeekStart:
|
||||
newOffset = offset
|
||||
}
|
||||
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"context"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
prometheus "github.com/docker/distribution/metrics"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// Metrics is used to hold metric counters
|
||||
|
@ -16,12 +17,20 @@ type Metrics struct {
|
|||
Misses uint64
|
||||
}
|
||||
|
||||
// Logger can be provided on the MetricsTracker to log errors.
|
||||
//
|
||||
// Usually, this is just a proxy to dcontext.GetLogger.
|
||||
type Logger interface {
|
||||
Errorf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// MetricsTracker represents a metric tracker
|
||||
// which simply counts the number of hits and misses.
|
||||
type MetricsTracker interface {
|
||||
Hit()
|
||||
Miss()
|
||||
Metrics() Metrics
|
||||
Logger(context.Context) Logger
|
||||
}
|
||||
|
||||
type cachedBlobStatter struct {
|
||||
|
@ -30,6 +39,11 @@ type cachedBlobStatter struct {
|
|||
tracker MetricsTracker
|
||||
}
|
||||
|
||||
var (
|
||||
// cacheCount is the number of total cache request received/hits/misses
|
||||
cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
|
||||
)
|
||||
|
||||
// NewCachedBlobStatter creates a new statter which prefers a cache and
|
||||
// falls back to a backend.
|
||||
func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
|
||||
|
@ -50,20 +64,22 @@ func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, b
|
|||
}
|
||||
|
||||
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||
cacheCount.WithValues("Request").Inc(1)
|
||||
desc, err := cbds.cache.Stat(ctx, dgst)
|
||||
if err != nil {
|
||||
if err != distribution.ErrBlobUnknown {
|
||||
context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err)
|
||||
logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err)
|
||||
}
|
||||
|
||||
goto fallback
|
||||
}
|
||||
|
||||
cacheCount.WithValues("Hit").Inc(1)
|
||||
if cbds.tracker != nil {
|
||||
cbds.tracker.Hit()
|
||||
}
|
||||
return desc, nil
|
||||
fallback:
|
||||
cacheCount.WithValues("Miss").Inc(1)
|
||||
if cbds.tracker != nil {
|
||||
cbds.tracker.Miss()
|
||||
}
|
||||
|
@ -73,7 +89,7 @@ fallback:
|
|||
}
|
||||
|
||||
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
|
||||
context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
|
||||
logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err)
|
||||
}
|
||||
|
||||
return desc, err
|
||||
|
@ -95,7 +111,19 @@ func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) er
|
|||
|
||||
func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
|
||||
context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
|
||||
logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) {
|
||||
if tracker == nil {
|
||||
return
|
||||
}
|
||||
|
||||
logger := tracker.Logger(ctx)
|
||||
if logger == nil {
|
||||
return
|
||||
}
|
||||
logger.Errorf(format, args...)
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
generated
vendored
|
@ -1,10 +1,10 @@
|
|||
package memory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/cache"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
|
2
vendor/github.com/docker/distribution/tags.go
generated
vendored
2
vendor/github.com/docker/distribution/tags.go
generated
vendored
|
@ -1,7 +1,7 @@
|
|||
package distribution
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution/context"
|
||||
"context"
|
||||
)
|
||||
|
||||
// TagService provides access to information about tagged objects.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue