fix: update lego.

This commit is contained in:
Ludovic Fernandez 2019-03-27 11:18:04 +01:00 committed by Traefiker Bot
parent b893374dc1
commit c17de070fb
432 changed files with 182 additions and 259514 deletions

237
Gopkg.lock generated
View file

@ -9,22 +9,6 @@
revision = "056a55f54a6cc77b440b31a56a5e7c3982d32811" revision = "056a55f54a6cc77b440b31a56a5e7c3982d32811"
version = "v0.22.0" version = "v0.22.0"
[[projects]]
branch = "master"
digest = "1:24afd6a7be85997c981693ddcc5f7b37fd3ed5cb119901e14737b9635b944cfb"
name = "github.com/ArthurHlt/go-eureka-client"
packages = ["eureka"]
pruneopts = "NUT"
revision = "9d0a49cbd39aa3634ae1977e9f519a262b10adaf"
[[projects]]
branch = "master"
digest = "1:922aa650254d0678003e182aeb75f6b094a0bfc40a87d198859d511cdafcfa8a"
name = "github.com/ArthurHlt/gominlog"
packages = ["."]
pruneopts = "NUT"
revision = "72eebf980f467d3ab3a8b4ddf660f664911ce519"
[[projects]] [[projects]]
digest = "1:59f6c2fd10ad014d2907eaa48a19070d5e7af35624328a4fbbf93b7b1c4a19e8" digest = "1:59f6c2fd10ad014d2907eaa48a19070d5e7af35624328a4fbbf93b7b1c4a19e8"
name = "github.com/Azure/azure-sdk-for-go" name = "github.com/Azure/azure-sdk-for-go"
@ -70,17 +54,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "a368813c5e648fee92e5f6c30e3944ff9d5e8895" revision = "a368813c5e648fee92e5f6c30e3944ff9d5e8895"
[[projects]]
branch = "master"
digest = "1:28e560d27a06cbebacd86531439aafa31c6da990607ca5a085d0005af325697f"
name = "github.com/BurntSushi/ty"
packages = [
".",
"fun",
]
pruneopts = "NUT"
revision = "6add9cd6ad42d389d6ead1dde60b4ad71e46fd74"
[[projects]] [[projects]]
digest = "1:ab7fee312bbdc8070d0325d841de8704cc78bf032b076200f1458659b74b8ed6" digest = "1:ab7fee312bbdc8070d0325d841de8704cc78bf032b076200f1458659b74b8ed6"
name = "github.com/JamesClonk/vultr" name = "github.com/JamesClonk/vultr"
@ -161,15 +134,11 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:d4ad87edcd6f6b6cfd1bfe8da6dd090e00901601c9270aa2928842402a361475" digest = "1:28be1959f81e9a6dec3058768a4c4535cf73fcd6e171d21688ad0a7fdf49d43a"
name = "github.com/abronan/valkeyrie" name = "github.com/abronan/valkeyrie"
packages = [ packages = [
".", ".",
"store", "store",
"store/boltdb",
"store/consul",
"store/etcd/v3",
"store/zookeeper",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "063d875e3c5fd734fa2aa12fac83829f62acfc70" revision = "063d875e3c5fd734fa2aa12fac83829f62acfc70"
@ -231,7 +200,7 @@
revision = "48572f11356f1843b694f21a290d4f1006bc5e47" revision = "48572f11356f1843b694f21a290d4f1006bc5e47"
[[projects]] [[projects]]
digest = "1:bfb036834a43e76abd318f0db39b0bbec6f7865680c1e443475c0297250a89ed" digest = "1:9991a3fc14d9de52a55cdb5a5f1f237140684eab050251ecfaaafa239a8ab5c9"
name = "github.com/aws/aws-sdk-go" name = "github.com/aws/aws-sdk-go"
packages = [ packages = [
"aws", "aws",
@ -254,7 +223,6 @@
"internal/sdkrand", "internal/sdkrand",
"internal/shareddefaults", "internal/shareddefaults",
"private/protocol", "private/protocol",
"private/protocol/ec2query",
"private/protocol/json/jsonutil", "private/protocol/json/jsonutil",
"private/protocol/jsonrpc", "private/protocol/jsonrpc",
"private/protocol/query", "private/protocol/query",
@ -262,11 +230,6 @@
"private/protocol/rest", "private/protocol/rest",
"private/protocol/restxml", "private/protocol/restxml",
"private/protocol/xml/xmlutil", "private/protocol/xml/xmlutil",
"service/dynamodb",
"service/dynamodb/dynamodbattribute",
"service/dynamodb/dynamodbiface",
"service/ec2",
"service/ecs",
"service/lightsail", "service/lightsail",
"service/route53", "service/route53",
"service/sts", "service/sts",
@ -350,29 +313,6 @@
revision = "7a9987c3a6d46be84e141a5c3191347ec10af17d" revision = "7a9987c3a6d46be84e141a5c3191347ec10af17d"
version = "v3.1.2" version = "v3.1.2"
[[projects]]
digest = "1:4e9c9d51ef5c98f2a71d6fa56a22f0bb8a4463656f9591ba24ebff2c21bcbd1f"
name = "github.com/coreos/bbolt"
packages = ["."]
pruneopts = "NUT"
revision = "32c383e75ce054674c53b5a07e55de85332aee14"
[[projects]]
digest = "1:f07fcc19d52f5d9f559573c6b625142bc4b018fd275029c020d06d2bad6f6c8c"
name = "github.com/coreos/etcd"
packages = [
"auth/authpb",
"clientv3",
"clientv3/concurrency",
"etcdserver/api/v3rpc/rpctypes",
"etcdserver/etcdserverpb",
"mvcc/mvccpb",
"pkg/types",
]
pruneopts = "NUT"
revision = "70c8726202dd91e482fb4029fd14af1d4ed1d5af"
version = "v3.3.5"
[[projects]] [[projects]]
digest = "1:fa91847d50d3f656fc2d2d608b9749b97d77528e8988ad8001f957640545e91e" digest = "1:fa91847d50d3f656fc2d2d608b9749b97d77528e8988ad8001f957640545e91e"
name = "github.com/coreos/go-systemd" name = "github.com/coreos/go-systemd"
@ -543,15 +483,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1" revision = "9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1"
[[projects]]
branch = "master"
digest = "1:2de676f0b5c5d051ae4343503760069de753c995a3cb3b01544a4924c65a93aa"
name = "github.com/docker/leadership"
packages = ["."]
pruneopts = "NUT"
revision = "a2e096d9fe0af5b4c37dd37aea719bc9c2e5eec6"
source = "github.com/containous/leadership"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:9b26bdc6b9952f728f61f510a48875c38974591c69b0afa77dcfe466c6162e9e" digest = "1:9b26bdc6b9952f728f61f510a48875c38974591c69b0afa77dcfe466c6162e9e"
@ -635,13 +566,6 @@
revision = "0a91ac8209d6a805f259ff881d0c2654221d0346" revision = "0a91ac8209d6a805f259ff881d0c2654221d0346"
version = "v0.14.3" version = "v0.14.3"
[[projects]]
digest = "1:b0d5e98ac0f0a509eb320f542e748582d637aae09e74538212e9712d1e71064b"
name = "github.com/fatih/color"
packages = ["."]
pruneopts = "NUT"
revision = "62e9147c64a1ed519147b62a56a14e83e2be02c1"
[[projects]] [[projects]]
digest = "1:aa3ed0a71c4e66e4ae6486bf97a3f4cab28edc78df2e50c5ad01dc7d91604b88" digest = "1:aa3ed0a71c4e66e4ae6486bf97a3f4cab28edc78df2e50c5ad01dc7d91604b88"
name = "github.com/fatih/structs" name = "github.com/fatih/structs"
@ -673,8 +597,7 @@
revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee" revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
[[projects]] [[projects]]
branch = "master" digest = "1:a04af13190b67ff69cf8fcd79ee133a24c4a7a900cacbc296261dd43f3fbde5c"
digest = "1:7dcfb91047873eb70b3a20ed5c400c4d05d9c1e73c9dac0e71ec0d44589d208a"
name = "github.com/go-acme/lego" name = "github.com/go-acme/lego"
packages = [ packages = [
"acme", "acme",
@ -712,6 +635,7 @@
"providers/dns/dnsmadeeasy", "providers/dns/dnsmadeeasy",
"providers/dns/dnsmadeeasy/internal", "providers/dns/dnsmadeeasy/internal",
"providers/dns/dnspod", "providers/dns/dnspod",
"providers/dns/dode",
"providers/dns/dreamhost", "providers/dns/dreamhost",
"providers/dns/duckdns", "providers/dns/duckdns",
"providers/dns/dyn", "providers/dns/dyn",
@ -758,7 +682,8 @@
"registration", "registration",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "0c87df143e630a1c50b4c36c8fbdda6cd993ebba" revision = "aaecc1ca7254190b71c5f01f57ee3bb6701bc937"
version = "v2.4.0"
[[projects]] [[projects]]
branch = "fork-containous" branch = "fork-containous"
@ -821,12 +746,10 @@
version = "v1.5.4" version = "v1.5.4"
[[projects]] [[projects]]
digest = "1:b518b9be1fc76244e246afe09113e3dd6246073b444787d30883877b82a0b90d" digest = "1:6689652ec1f6e30455551da19c707f2bfac75e4df5c7bbe3f0ad7b49b9aa2cfc"
name = "github.com/gogo/protobuf" name = "github.com/gogo/protobuf"
packages = [ packages = [
"gogoproto",
"proto", "proto",
"protoc-gen-gogo/descriptor",
"sortkeys", "sortkeys",
] ]
pruneopts = "NUT" pruneopts = "NUT"
@ -971,29 +894,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229" revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229"
[[projects]]
digest = "1:c3f14b698c0f5c5729896489f4b526f519d1d2522e697d63f532901d0e183dff"
name = "github.com/hashicorp/consul"
packages = ["api"]
pruneopts = "NUT"
revision = "9a494b5fb9c86180a5702e29c485df1507a47198"
version = "v1.0.6"
[[projects]]
digest = "1:7b699584752575e81e3f4e8b00cfb3e5d6fa5419d5d212ef925e02c798847464"
name = "github.com/hashicorp/go-cleanhttp"
packages = ["."]
pruneopts = "NUT"
revision = "3573b8b52aa7b37b9358d966a898feb387f62437"
[[projects]]
branch = "master"
digest = "1:cdb5ce76cd7af19e3d2d5ba9b6458a2ee804f0d376711215dd3df5f51100d423"
name = "github.com/hashicorp/go-rootcerts"
packages = ["."]
pruneopts = "NUT"
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
[[projects]] [[projects]]
digest = "1:f7b3db9cb74d13f6a7cf84b3801e68585745eacaf7d40cc10ecc4734c30503d3" digest = "1:f7b3db9cb74d13f6a7cf84b3801e68585745eacaf7d40cc10ecc4734c30503d3"
name = "github.com/hashicorp/go-version" name = "github.com/hashicorp/go-version"
@ -1012,13 +912,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]]
digest = "1:c6552ffc71f7586a9ea624c6b2bdab87a47d9bd52889e3baf5fbf200425a85e2"
name = "github.com/hashicorp/serf"
packages = ["coordinate"]
pruneopts = "NUT"
revision = "19f2c401e122352c047a84d6584dd51e2fb8fcc4"
[[projects]] [[projects]]
digest = "1:45e66b20393507035c6a7d15bef5ffe8faf5b083621c1284d9824cc052776de5" digest = "1:45e66b20393507035c6a7d15bef5ffe8faf5b083621c1284d9824cc052776de5"
name = "github.com/huandu/xstrings" name = "github.com/huandu/xstrings"
@ -1192,20 +1085,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "c1c17f74874f2a5ea48bfb06b5459d4ef2689749" revision = "c1c17f74874f2a5ea48bfb06b5459d4ef2689749"
[[projects]]
digest = "1:4953945f4fdc12cb7aa0263710534fb64b35a85e4047570fdf1cb03284055f0d"
name = "github.com/mattn/go-colorable"
packages = ["."]
pruneopts = "NUT"
revision = "5411d3eea5978e6cdc258b30de592b60df6aba96"
[[projects]]
digest = "1:89e4861dccb76fd84b7de2d88791cc8d23e125805397db27195b4dd83c459713"
name = "github.com/mattn/go-isatty"
packages = ["."]
pruneopts = "NUT"
revision = "57fdcb988a5c543893cc61bce354a6e24ab70022"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
@ -1214,37 +1093,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
[[projects]]
digest = "1:337c4007f8ecd8092927c3f9d3c7e391a9e7222aa579dd07a6008379d92d61f6"
name = "github.com/mesos/mesos-go"
packages = [
"detector",
"detector/zoo",
"mesosproto",
"mesosutil",
"upid",
]
pruneopts = "NUT"
revision = "068d5470506e3780189fe607af40892814197c5e"
[[projects]]
branch = "master"
digest = "1:940a93ac88a908ef0908939181ab048c4172ba7babd32d896ac8b1ef5c1f9036"
name = "github.com/mesosphere/mesos-dns"
packages = [
"detect",
"errorutil",
"logging",
"models",
"records",
"records/labels",
"records/state",
"util",
]
pruneopts = "NUT"
revision = "b47dc4c19f215e98da687b15b4c64e70f629bea5"
source = "https://github.com/containous/mesos-dns.git"
[[projects]] [[projects]]
digest = "1:b83995756f9b1a24c518d40052d80f524f0a9024ee0479d8a8e91ec2548074d1" digest = "1:b83995756f9b1a24c518d40052d80f524f0a9024ee0479d8a8e91ec2548074d1"
name = "github.com/miekg/dns" name = "github.com/miekg/dns"
@ -1540,22 +1388,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "a1dba9ce8baed984a2495b658c82687f8157b98f" revision = "a1dba9ce8baed984a2495b658c82687f8157b98f"
[[projects]]
digest = "1:f140e414b284a500b812b26bda8b9a5110aa110f0473ec97e79f3245000c730a"
name = "github.com/rancher/go-rancher"
packages = ["v2"]
pruneopts = "NUT"
revision = "52e2f489534007ae843065468c5a1920d542afa4"
[[projects]]
branch = "containous-fork"
digest = "1:4e63da6276e18c5e12eadef1ec3cabd437698bef5e60ffdce7a75b492cd1c14e"
name = "github.com/rancher/go-rancher-metadata"
packages = ["metadata"]
pruneopts = "NUT"
revision = "e937e8308985dfd3bc157cc8a284454f0cbf4fef"
source = "github.com/containous/go-rancher-metadata"
[[projects]] [[projects]]
digest = "1:0d095d4b1220902aec6896e23808fbcdfa5192dab96d5a31a443a8c47eabc326" digest = "1:0d095d4b1220902aec6896e23808fbcdfa5192dab96d5a31a443a8c47eabc326"
name = "github.com/rcrowley/go-metrics" name = "github.com/rcrowley/go-metrics"
@ -1584,13 +1416,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "306ea89b6ef19334614f7b0fc5aa19595022bb8c" revision = "306ea89b6ef19334614f7b0fc5aa19595022bb8c"
[[projects]]
digest = "1:142520cf3c9bb85449dd0000f820b8c604531587ee654793c54909be7dabadac"
name = "github.com/samuel/go-zookeeper"
packages = ["zk"]
pruneopts = "NUT"
revision = "1d7be4effb13d2d908342d349d71a284a7542693"
[[projects]] [[projects]]
digest = "1:6bc0652ea6e39e22ccd522458b8bdd8665bf23bdc5a20eec90056e4dc7e273ca" digest = "1:6bc0652ea6e39e22ccd522458b8bdd8665bf23bdc5a20eec90056e4dc7e273ca"
name = "github.com/satori/go.uuid" name = "github.com/satori/go.uuid"
@ -1681,13 +1506,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "1dc93a7db3567a5ccf865106afac88278ba940cf" revision = "1dc93a7db3567a5ccf865106afac88278ba940cf"
[[projects]]
digest = "1:9b2996458a2f7d1f3e0ebf08152acfe8c1106f3fe855d08121c5ee7d801a063f"
name = "github.com/tv42/zbase32"
packages = ["."]
pruneopts = "NUT"
revision = "03389da7e0bf9844767f82690f4d68fc097a1306"
[[projects]] [[projects]]
digest = "1:7d3a890e525da3b7014d26dd1d4a0e4d31a479995007cd11989ad31db132e66c" digest = "1:7d3a890e525da3b7014d26dd1d4a0e4d31a479995007cd11989ad31db132e66c"
name = "github.com/uber/jaeger-client-go" name = "github.com/uber/jaeger-client-go"
@ -1738,13 +1556,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "a1cf62cc2159fff407728f118c41aece76c397fa" revision = "a1cf62cc2159fff407728f118c41aece76c397fa"
[[projects]]
digest = "1:e84e99d5f369afaa9a5c41f55b57fa03047ecd3bac2a65861607882693ceea81"
name = "github.com/urfave/negroni"
packages = ["."]
pruneopts = "NUT"
revision = "490e6a555d47ca891a89a150d0c1ef3922dfffe9"
[[projects]] [[projects]]
digest = "1:a68c3f55d44d225da4f22ffbed2d8572d267cb19aaa1d60537769034ac66bc01" digest = "1:a68c3f55d44d225da4f22ffbed2d8572d267cb19aaa1d60537769034ac66bc01"
name = "github.com/vdemeester/shakers" name = "github.com/vdemeester/shakers"
@ -1956,7 +1767,7 @@
revision = "09f6ed296fc66555a25fe4ce95173148778dfa85" revision = "09f6ed296fc66555a25fe4ce95173148778dfa85"
[[projects]] [[projects]]
digest = "1:a893d24a604ae6f45f2d6e00ae9e817476e110e2dd5455152b35bb720ca2f626" digest = "1:a840929a3a2d91282dc853cbd5f586069c14ae373247fb7d4cb4fa02b285326e"
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
packages = [ packages = [
".", ".",
@ -1971,7 +1782,6 @@
"encoding/proto", "encoding/proto",
"grpclb/grpc_lb_v1/messages", "grpclb/grpc_lb_v1/messages",
"grpclog", "grpclog",
"health/grpc_health_v1",
"internal", "internal",
"keepalive", "keepalive",
"metadata", "metadata",
@ -2348,29 +2158,12 @@
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
input-imports = [ input-imports = [
"github.com/ArthurHlt/go-eureka-client/eureka",
"github.com/BurntSushi/toml", "github.com/BurntSushi/toml",
"github.com/BurntSushi/ty/fun",
"github.com/Masterminds/sprig", "github.com/Masterminds/sprig",
"github.com/NYTimes/gziphandler", "github.com/NYTimes/gziphandler",
"github.com/abbot/go-http-auth", "github.com/abbot/go-http-auth",
"github.com/abronan/valkeyrie",
"github.com/abronan/valkeyrie/store", "github.com/abronan/valkeyrie/store",
"github.com/abronan/valkeyrie/store/boltdb",
"github.com/abronan/valkeyrie/store/consul",
"github.com/abronan/valkeyrie/store/etcd/v3",
"github.com/abronan/valkeyrie/store/zookeeper",
"github.com/armon/go-proxyproto", "github.com/armon/go-proxyproto",
"github.com/aws/aws-sdk-go/aws",
"github.com/aws/aws-sdk-go/aws/credentials",
"github.com/aws/aws-sdk-go/aws/defaults",
"github.com/aws/aws-sdk-go/aws/ec2metadata",
"github.com/aws/aws-sdk-go/aws/session",
"github.com/aws/aws-sdk-go/service/dynamodb",
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute",
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface",
"github.com/aws/aws-sdk-go/service/ec2",
"github.com/aws/aws-sdk-go/service/ecs",
"github.com/cenkalti/backoff", "github.com/cenkalti/backoff",
"github.com/containous/alice", "github.com/containous/alice",
"github.com/containous/flaeg", "github.com/containous/flaeg",
@ -2390,7 +2183,6 @@
"github.com/docker/docker/pkg/namesgenerator", "github.com/docker/docker/pkg/namesgenerator",
"github.com/docker/go-connections/nat", "github.com/docker/go-connections/nat",
"github.com/docker/go-connections/sockets", "github.com/docker/go-connections/sockets",
"github.com/docker/leadership",
"github.com/eapache/channels", "github.com/eapache/channels",
"github.com/elazarl/go-bindata-assetfs", "github.com/elazarl/go-bindata-assetfs",
"github.com/gambol99/go-marathon", "github.com/gambol99/go-marathon",
@ -2414,25 +2206,15 @@
"github.com/golang/protobuf/proto", "github.com/golang/protobuf/proto",
"github.com/google/go-github/github", "github.com/google/go-github/github",
"github.com/gorilla/websocket", "github.com/gorilla/websocket",
"github.com/hashicorp/consul/api",
"github.com/hashicorp/go-version", "github.com/hashicorp/go-version",
"github.com/influxdata/influxdb/client/v2", "github.com/influxdata/influxdb/client/v2",
"github.com/instana/go-sensor", "github.com/instana/go-sensor",
"github.com/libkermit/compose/check", "github.com/libkermit/compose/check",
"github.com/libkermit/docker", "github.com/libkermit/docker",
"github.com/libkermit/docker-check", "github.com/libkermit/docker-check",
"github.com/mesos/mesos-go/detector",
"github.com/mesos/mesos-go/detector/zoo",
"github.com/mesos/mesos-go/upid",
"github.com/mesosphere/mesos-dns/detect",
"github.com/mesosphere/mesos-dns/logging",
"github.com/mesosphere/mesos-dns/records",
"github.com/mesosphere/mesos-dns/records/state",
"github.com/mesosphere/mesos-dns/util",
"github.com/miekg/dns", "github.com/miekg/dns",
"github.com/mitchellh/copystructure", "github.com/mitchellh/copystructure",
"github.com/mitchellh/hashstructure", "github.com/mitchellh/hashstructure",
"github.com/mitchellh/mapstructure",
"github.com/mvdan/xurls", "github.com/mvdan/xurls",
"github.com/ogier/pflag", "github.com/ogier/pflag",
"github.com/opentracing/opentracing-go", "github.com/opentracing/opentracing-go",
@ -2444,8 +2226,6 @@
"github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp", "github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/prometheus/client_model/go", "github.com/prometheus/client_model/go",
"github.com/rancher/go-rancher-metadata/metadata",
"github.com/rancher/go-rancher/v2",
"github.com/ryanuber/go-glob", "github.com/ryanuber/go-glob",
"github.com/satori/go.uuid", "github.com/satori/go.uuid",
"github.com/sirupsen/logrus", "github.com/sirupsen/logrus",
@ -2460,7 +2240,6 @@
"github.com/uber/jaeger-lib/metrics", "github.com/uber/jaeger-lib/metrics",
"github.com/unrolled/render", "github.com/unrolled/render",
"github.com/unrolled/secure", "github.com/unrolled/secure",
"github.com/urfave/negroni",
"github.com/vdemeester/shakers", "github.com/vdemeester/shakers",
"github.com/vulcand/oxy/buffer", "github.com/vulcand/oxy/buffer",
"github.com/vulcand/oxy/cbreaker", "github.com/vulcand/oxy/cbreaker",

View file

@ -190,9 +190,9 @@ required = [
name = "github.com/vulcand/oxy" name = "github.com/vulcand/oxy"
[[constraint]] [[constraint]]
branch = "master" # branch = "master"
name = "github.com/go-acme/lego" name = "github.com/go-acme/lego"
# version = "2.4.0" version = "2.4.0"
[[constraint]] [[constraint]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"

View file

@ -130,6 +130,7 @@ For example, `CF_API_EMAIL_FILE=/run/secrets/traefik_cf-api-email` could be used
| [DNSimple](https://dnsimple.com) | `dnsimple` | `DNSIMPLE_OAUTH_TOKEN`, `DNSIMPLE_BASE_URL` | YES | | [DNSimple](https://dnsimple.com) | `dnsimple` | `DNSIMPLE_OAUTH_TOKEN`, `DNSIMPLE_BASE_URL` | YES |
| [DNS Made Easy](https://dnsmadeeasy.com) | `dnsmadeeasy` | `DNSMADEEASY_API_KEY`, `DNSMADEEASY_API_SECRET`, `DNSMADEEASY_SANDBOX` | Not tested yet | | [DNS Made Easy](https://dnsmadeeasy.com) | `dnsmadeeasy` | `DNSMADEEASY_API_KEY`, `DNSMADEEASY_API_SECRET`, `DNSMADEEASY_SANDBOX` | Not tested yet |
| [DNSPod](https://www.dnspod.com/) | `dnspod` | `DNSPOD_API_KEY` | Not tested yet | | [DNSPod](https://www.dnspod.com/) | `dnspod` | `DNSPOD_API_KEY` | Not tested yet |
| [Domain Offensive (do.de)](https://www.do.de/) | `dode` | `DODE_TOKEN` | YES |
| [DreamHost](https://www.dreamhost.com/) | `dreamhost` | `DREAMHOST_API_KEY` | YES | | [DreamHost](https://www.dreamhost.com/) | `dreamhost` | `DREAMHOST_API_KEY` | YES |
| [Duck DNS](https://www.duckdns.org/) | `duckdns` | `DUCKDNS_TOKEN` | YES | | [Duck DNS](https://www.duckdns.org/) | `duckdns` | `DUCKDNS_TOKEN` | YES |
| [Dyn](https://dyn.com) | `dyn` | `DYN_CUSTOMER_NAME`, `DYN_USER_NAME`, `DYN_PASSWORD` | Not tested yet | | [Dyn](https://dyn.com) | `dyn` | `DYN_CUSTOMER_NAME`, `DYN_USER_NAME`, `DYN_PASSWORD` | Not tested yet |

View file

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2017 Arthur Halet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,357 +0,0 @@
package eureka
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"path"
"time"
"strings"
)
const (
defaultBufferSize = 10
UP = "UP"
DOWN = "DOWN"
STARTING = "STARTING"
)
type Config struct {
CertFile string `json:"certFile"`
KeyFile string `json:"keyFile"`
CaCertFile []string `json:"caCertFiles"`
DialTimeout time.Duration `json:"timeout"`
Consistency string `json:"consistency"`
}
type Client struct {
Config Config `json:"config"`
Cluster *Cluster `json:"cluster"`
httpClient *http.Client
persistence io.Writer
cURLch chan string
// CheckRetry can be used to control the policy for failed requests
// and modify the cluster if needed.
// The client calls it before sending requests again, and
// stops retrying if CheckRetry returns some error. The cases that
// this function needs to handle include no response and unexpected
// http status code of response.
// If CheckRetry is nil, client will call the default one
// `DefaultCheckRetry`.
// Argument cluster is the eureka.Cluster object that these requests have been made on.
// Argument numReqs is the number of http.Requests that have been made so far.
// Argument lastResp is the http.Responses from the last request.
// Argument err is the reason of the failure.
CheckRetry func(cluster *Cluster, numReqs int,
lastResp http.Response, err error) error
}
// NewClient create a basic client that is configured to be used
// with the given machine list.
func NewClient(machines []string) *Client {
config := Config{
// default timeout is one second
DialTimeout: time.Second,
}
client := &Client{
Cluster: NewCluster(machines),
Config: config,
}
client.initHTTPClient()
return client
}
// NewTLSClient create a basic client with TLS configuration
func NewTLSClient(machines []string, cert string, key string, caCerts []string) (*Client, error) {
// overwrite the default machine to use https
if len(machines) == 0 {
machines = []string{"https://127.0.0.1:4001"}
}
config := Config{
// default timeout is one second
DialTimeout: time.Second,
CertFile: cert,
KeyFile: key,
CaCertFile: make([]string, 0),
}
client := &Client{
Cluster: NewCluster(machines),
Config: config,
}
err := client.initHTTPSClient(cert, key)
if err != nil {
return nil, err
}
for _, caCert := range caCerts {
if err := client.AddRootCA(caCert); err != nil {
return nil, err
}
}
return client, nil
}
// NewClientFromFile creates a client from a given file path.
// The given file is expected to use the JSON format.
func NewClientFromFile(fpath string) (*Client, error) {
fi, err := os.Open(fpath)
if err != nil {
return nil, err
}
defer func() {
if err := fi.Close(); err != nil {
panic(err)
}
}()
return NewClientFromReader(fi)
}
// NewClientFromReader creates a Client configured from a given reader.
// The configuration is expected to use the JSON format.
func NewClientFromReader(reader io.Reader) (*Client, error) {
c := new(Client)
b, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
err = json.Unmarshal(b, c)
if err != nil {
return nil, err
}
if c.Config.CertFile == "" {
c.initHTTPClient()
} else {
err = c.initHTTPSClient(c.Config.CertFile, c.Config.KeyFile)
}
if err != nil {
return nil, err
}
for _, caCert := range c.Config.CaCertFile {
if err := c.AddRootCA(caCert); err != nil {
return nil, err
}
}
return c, nil
}
// Override the Client's HTTP Transport object
func (c *Client) SetTransport(tr *http.Transport) {
c.httpClient.Transport = tr
}
// initHTTPClient initializes a HTTP client for eureka client
func (c *Client) initHTTPClient() {
tr := &http.Transport{
Dial: c.dial,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
c.httpClient = &http.Client{Transport: tr}
}
// initHTTPClient initializes a HTTPS client for eureka client
func (c *Client) initHTTPSClient(cert, key string) error {
if cert == "" || key == "" {
return errors.New("Require both cert and key path")
}
tlsCert, err := tls.LoadX509KeyPair(cert, key)
if err != nil {
return err
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{tlsCert},
InsecureSkipVerify: true,
}
tr := &http.Transport{
TLSClientConfig: tlsConfig,
Dial: c.dial,
}
c.httpClient = &http.Client{Transport: tr}
return nil
}
// Sets the DialTimeout value
func (c *Client) SetDialTimeout(d time.Duration) {
c.Config.DialTimeout = d
}
// AddRootCA adds a root CA cert for the eureka client
func (c *Client) AddRootCA(caCert string) error {
if c.httpClient == nil {
return errors.New("Client has not been initialized yet!")
}
certBytes, err := ioutil.ReadFile(caCert)
if err != nil {
return err
}
tr, ok := c.httpClient.Transport.(*http.Transport)
if !ok {
panic("AddRootCA(): Transport type assert should not fail")
}
if tr.TLSClientConfig.RootCAs == nil {
caCertPool := x509.NewCertPool()
ok = caCertPool.AppendCertsFromPEM(certBytes)
if ok {
tr.TLSClientConfig.RootCAs = caCertPool
}
tr.TLSClientConfig.InsecureSkipVerify = false
} else {
ok = tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(certBytes)
}
if !ok {
err = errors.New("Unable to load caCert")
}
c.Config.CaCertFile = append(c.Config.CaCertFile, caCert)
return err
}
// SetCluster updates cluster information using the given machine list.
func (c *Client) SetCluster(machines []string) bool {
success := c.internalSyncCluster(machines)
return success
}
func (c *Client) GetCluster() []string {
return c.Cluster.Machines
}
// SyncCluster updates the cluster information using the internal machine list.
func (c *Client) SyncCluster() bool {
return c.internalSyncCluster(c.Cluster.Machines)
}
// internalSyncCluster syncs cluster information using the given machine list.
func (c *Client) internalSyncCluster(machines []string) bool {
for _, machine := range machines {
httpPath := c.createHttpPath(machine, "machines")
resp, err := c.httpClient.Get(httpPath)
if err != nil {
// try another machine in the cluster
continue
} else {
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
// try another machine in the cluster
continue
}
// update Machines List
c.Cluster.updateFromStr(string(b))
// update leader
// the first one in the machine list is the leader
c.Cluster.switchLeader(0)
logger.Debug("sync.machines " + strings.Join(c.Cluster.Machines, ", "))
return true
}
}
return false
}
// createHttpPath creates a complete HTTP URL.
// serverName should contain both the host name and a port number, if any.
func (c *Client) createHttpPath(serverName string, _path string) string {
u, err := url.Parse(serverName)
if err != nil {
panic(err)
}
u.Path = path.Join(u.Path, _path)
if u.Scheme == "" {
u.Scheme = "http"
}
return u.String()
}
// dial attempts to open a TCP connection to the provided address, explicitly
// enabling keep-alives with a one-second interval.
func (c *Client) dial(network, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(network, addr, c.Config.DialTimeout)
if err != nil {
return nil, err
}
tcpConn, ok := conn.(*net.TCPConn)
if !ok {
return nil, errors.New("Failed type-assertion of net.Conn as *net.TCPConn")
}
// Keep TCP alive to check whether or not the remote machine is down
if err = tcpConn.SetKeepAlive(true); err != nil {
return nil, err
}
if err = tcpConn.SetKeepAlivePeriod(time.Second); err != nil {
return nil, err
}
return tcpConn, nil
}
// MarshalJSON implements the Marshaller interface
// as defined by the standard JSON package.
func (c *Client) MarshalJSON() ([]byte, error) {
b, err := json.Marshal(struct {
Config Config `json:"config"`
Cluster *Cluster `json:"cluster"`
}{
Config: c.Config,
Cluster: c.Cluster,
})
if err != nil {
return nil, err
}
return b, nil
}
// UnmarshalJSON implements the Unmarshaller interface
// as defined by the standard JSON package.
func (c *Client) UnmarshalJSON(b []byte) error {
temp := struct {
Config Config `json:"config"`
Cluster *Cluster `json:"cluster"`
}{}
err := json.Unmarshal(b, &temp)
if err != nil {
return err
}
c.Cluster = temp.Cluster
c.Config = temp.Config
return nil
}

View file

@ -1,51 +0,0 @@
package eureka
import (
"net/url"
"strings"
)
type Cluster struct {
Leader string `json:"leader"`
Machines []string `json:"machines"`
}
func NewCluster(machines []string) *Cluster {
// if an empty slice was sent in then just assume HTTP 4001 on localhost
if len(machines) == 0 {
machines = []string{"http://127.0.0.1:4001"}
}
// default leader and machines
return &Cluster{
Leader: machines[0],
Machines: machines,
}
}
// switchLeader switch the current leader to machines[num]
func (cl *Cluster) switchLeader(num int) {
logger.Debug("switch.leader[from %v to %v]",
cl.Leader, cl.Machines[num])
cl.Leader = cl.Machines[num]
}
func (cl *Cluster) updateFromStr(machines string) {
cl.Machines = strings.Split(machines, ", ")
}
func (cl *Cluster) updateLeader(leader string) {
logger.Debug("update.leader[%s,%s]", cl.Leader, leader)
cl.Leader = leader
}
func (cl *Cluster) updateLeaderFromURL(u *url.URL) {
var leader string
if u.Scheme == "" {
leader = "http://" + u.Host
} else {
leader = u.Scheme + "://" + u.Host
}
cl.updateLeader(leader)
}

View file

@ -1,21 +0,0 @@
package eureka
import (
"github.com/ArthurHlt/gominlog"
"log"
)
var logger *gominlog.MinLog
func GetLogger() *log.Logger {
return logger.GetLogger()
}
func SetLogger(loggerLog *log.Logger) {
logger.SetLogger(loggerLog)
}
func init() {
// Default logger uses the go default log.
logger = gominlog.NewClassicMinLogWithPackageName("go-eureka-client")
}

View file

@ -1,10 +0,0 @@
package eureka
import "strings"
func (c *Client) UnregisterInstance(appId, instanceId string) error {
values := []string{"apps", appId, instanceId}
path := strings.Join(values, "/")
_, err := c.Delete(path)
return err
}

View file

@ -1,48 +0,0 @@
package eureka
import (
"encoding/json"
"fmt"
)
const (
ErrCodeEurekaNotReachable = 501
)
var (
errorMap = map[int]string{
ErrCodeEurekaNotReachable: "All the given peers are not reachable",
}
)
type EurekaError struct {
ErrorCode int `json:"errorCode"`
Message string `json:"message"`
Cause string `json:"cause,omitempty"`
Index uint64 `json:"index"`
}
func (e EurekaError) Error() string {
return fmt.Sprintf("%v: %v (%v) [%v]", e.ErrorCode, e.Message, e.Cause, e.Index)
}
func newError(errorCode int, cause string, index uint64) *EurekaError {
return &EurekaError{
ErrorCode: errorCode,
Message: errorMap[errorCode],
Cause: cause,
Index: index,
}
}
func handleError(b []byte) error {
eurekaErr := new(EurekaError)
err := json.Unmarshal(b, eurekaErr)
if err != nil {
logger.Warning("cannot unmarshal eureka error: %v", err)
return err
}
return eurekaErr
}

View file

@ -1,64 +0,0 @@
package eureka
import (
"encoding/xml"
"strings"
)
func (c *Client) GetApplications() (*Applications, error) {
response, err := c.Get("apps")
if err != nil {
return nil, err
}
var applications *Applications = new(Applications)
err = xml.Unmarshal(response.Body, applications)
return applications, err
}
func (c *Client) GetApplication(appId string) (*Application, error) {
values := []string{"apps", appId}
path := strings.Join(values, "/")
response, err := c.Get(path)
if err != nil {
return nil, err
}
var application *Application = new(Application)
err = xml.Unmarshal(response.Body, application)
return application, err
}
func (c *Client) GetInstance(appId, instanceId string) (*InstanceInfo, error) {
values := []string{"apps", appId, instanceId}
path := strings.Join(values, "/")
response, err := c.Get(path)
if err != nil {
return nil, err
}
var instance *InstanceInfo = new(InstanceInfo)
err = xml.Unmarshal(response.Body, instance)
return instance, err
}
func (c *Client) GetVIP(vipId string) (*Applications, error) {
values := []string{"vips", vipId}
path := strings.Join(values, "/")
response, err := c.Get(path)
if err != nil {
return nil, err
}
var applications *Applications = new(Applications)
err = xml.Unmarshal(response.Body, applications)
return applications, err
}
func (c *Client) GetSVIP(svipId string) (*Applications, error) {
values := []string{"svips", svipId}
path := strings.Join(values, "/")
response, err := c.Get(path)
if err != nil {
return nil, err
}
var applications *Applications = new(Applications)
err = xml.Unmarshal(response.Body, applications)
return applications, err
}

View file

@ -1,95 +0,0 @@
package eureka
import (
"encoding/xml"
"encoding/json"
"regexp"
)
type MetaData struct {
Map map[string]string
Class string
}
type Vraw struct {
Content []byte `xml:",innerxml"`
Class string `xml:"class,attr" json:"@class"`
}
func (s *MetaData) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
var attributes []xml.Attr = make([]xml.Attr, 0)
if s.Class != "" {
attributes = append(attributes, xml.Attr{
Name: xml.Name{
Local: "class",
},
Value: s.Class,
})
}
start.Attr = attributes
tokens := []xml.Token{start}
for key, value := range s.Map {
t := xml.StartElement{Name: xml.Name{"", key}}
tokens = append(tokens, t, xml.CharData(value), xml.EndElement{t.Name})
}
tokens = append(tokens, xml.EndElement{
Name: start.Name,
})
for _, t := range tokens {
err := e.EncodeToken(t)
if err != nil {
return err
}
}
// flush to ensure tokens are written
err := e.Flush()
if err != nil {
return err
}
return nil
}
func (s *MetaData) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
s.Map = make(map[string]string)
vraw := &Vraw{}
d.DecodeElement(vraw, &start)
dataInString := string(vraw.Content)
regex, err := regexp.Compile("\\s*<([^<>]+)>([^<>]+)</[^<>]+>\\s*")
if err != nil {
return err
}
subMatches := regex.FindAllStringSubmatch(dataInString, -1)
for _, subMatch := range subMatches {
s.Map[subMatch[1]] = subMatch[2]
}
s.Class = vraw.Class
return nil
}
func (s *MetaData) MarshalJSON() ([]byte, error) {
mapIt := make(map[string]string)
for key, value := range s.Map {
mapIt[key] = value
}
if s.Class != "" {
mapIt["@class"] = s.Class
}
return json.Marshal(mapIt)
}
func (s *MetaData) UnmarshalJSON(data []byte) error {
dataUnmarshal := make(map[string]string)
err := json.Unmarshal(data, dataUnmarshal)
s.Map = dataUnmarshal
if val, ok := s.Map["@class"]; ok {
s.Class = val
delete(s.Map, "@class")
}
return err
}

View file

@ -1,21 +0,0 @@
package eureka
import (
"encoding/json"
"strings"
)
func (c *Client) RegisterInstance(appId string, instanceInfo *InstanceInfo) error {
values := []string{"apps", appId}
path := strings.Join(values, "/")
instance := &Instance{
Instance: instanceInfo,
}
body, err := json.Marshal(instance)
if err != nil {
return err
}
_, err = c.Post(path, body)
return err
}

View file

@ -1,10 +0,0 @@
package eureka
import "strings"
func (c *Client) SendHeartbeat(appId, instanceId string) error {
values := []string{"apps", appId, instanceId}
path := strings.Join(values, "/")
_, err := c.Put(path, nil)
return err
}

View file

@ -1,439 +0,0 @@
package eureka
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"sync"
"time"
"strconv"
)
// Errors introduced by handling requests
var (
ErrRequestCancelled = errors.New("sending request is cancelled")
)
type RawRequest struct {
method string
relativePath string
body []byte
cancel <-chan bool
}
type Applications struct {
VersionsDelta int `xml:"versions__delta"`
AppsHashcode string `xml:"apps__hashcode"`
Applications []Application `xml:"application,omitempty"`
}
type Application struct {
Name string `xml:"name"`
Instances []InstanceInfo `xml:"instance"`
}
type Instance struct {
Instance *InstanceInfo `xml:"instance" json:"instance"`
}
type Port struct {
Port int `xml:",chardata" json:"$"`
Enabled bool `xml:"enabled,attr" json:"@enabled"`
}
type InstanceInfo struct {
HostName string `xml:"hostName" json:"hostName"`
HomePageUrl string `xml:"homePageUrl,omitempty" json:"homePageUrl,omitempty"`
StatusPageUrl string `xml:"statusPageUrl" json:"statusPageUrl"`
HealthCheckUrl string `xml:"healthCheckUrl,omitempty" json:"healthCheckUrl,omitempty"`
App string `xml:"app" json:"app"`
IpAddr string `xml:"ipAddr" json:"ipAddr"`
VipAddress string `xml:"vipAddress" json:"vipAddress"`
secureVipAddress string `xml:"secureVipAddress,omitempty" json:"secureVipAddress,omitempty"`
Status string `xml:"status" json:"status"`
Port *Port `xml:"port,omitempty" json:"port,omitempty"`
SecurePort *Port `xml:"securePort,omitempty" json:"securePort,omitempty"`
DataCenterInfo *DataCenterInfo `xml:"dataCenterInfo" json:"dataCenterInfo"`
LeaseInfo *LeaseInfo `xml:"leaseInfo,omitempty" json:"leaseInfo,omitempty"`
Metadata *MetaData `xml:"metadata,omitempty" json:"metadata,omitempty"`
IsCoordinatingDiscoveryServer bool `xml:"isCoordinatingDiscoveryServer,omitempty" json:"isCoordinatingDiscoveryServer,omitempty"`
LastUpdatedTimestamp int `xml:"lastUpdatedTimestamp,omitempty" json:"lastUpdatedTimestamp,omitempty"`
LastDirtyTimestamp int `xml:"lastDirtyTimestamp,omitempty" json:"lastDirtyTimestamp,omitempty"`
ActionType string `xml:"actionType,omitempty" json:"actionType,omitempty"`
Overriddenstatus string `xml:"overriddenstatus,omitempty" json:"overriddenstatus,omitempty"`
CountryId int `xml:"countryId,omitempty" json:"countryId,omitempty"`
}
type DataCenterInfo struct {
Name string `xml:"name" json:"name"`
Class string `xml:"class,attr" json:"@class"`
Metadata *DataCenterMetadata `xml:"metadata,omitempty" json:"metadata,omitempty"`
}
type DataCenterMetadata struct {
AmiLaunchIndex string `xml:"ami-launch-index,omitempty" json:"ami-launch-index,omitempty"`
LocalHostname string `xml:"local-hostname,omitempty" json:"local-hostname,omitempty"`
AvailabilityZone string `xml:"availability-zone,omitempty" json:"availability-zone,omitempty"`
InstanceId string `xml:"instance-id,omitempty" json:"instance-id,omitempty"`
PublicIpv4 string `xml:"public-ipv4,omitempty" json:"public-ipv4,omitempty"`
PublicHostname string `xml:"public-hostname,omitempty" json:"public-hostname,omitempty"`
AmiManifestPath string `xml:"ami-manifest-path,omitempty" json:"ami-manifest-path,omitempty"`
LocalIpv4 string `xml:"local-ipv4,omitempty" json:"local-ipv4,omitempty"`
Hostname string `xml:"hostname,omitempty" json:"hostname,omitempty"`
AmiId string `xml:"ami-id,omitempty" json:"ami-id,omitempty"`
InstanceType string `xml:"instance-type,omitempty" json:"instance-type,omitempty"`
}
type LeaseInfo struct {
EvictionDurationInSecs uint `xml:"evictionDurationInSecs,omitempty" json:"evictionDurationInSecs,omitempty"`
RenewalIntervalInSecs int `xml:"renewalIntervalInSecs,omitempty" json:"renewalIntervalInSecs,omitempty"`
DurationInSecs int `xml:"durationInSecs,omitempty" json:"durationInSecs,omitempty"`
RegistrationTimestamp int `xml:"registrationTimestamp,omitempty" json:"registrationTimestamp,omitempty"`
LastRenewalTimestamp int `xml:"lastRenewalTimestamp,omitempty" json:"lastRenewalTimestamp,omitempty"`
EvictionTimestamp int `xml:"evictionTimestamp,omitempty" json:"evictionTimestamp,omitempty"`
ServiceUpTimestamp int `xml:"serviceUpTimestamp,omitempty" json:"serviceUpTimestamp,omitempty"`
}
func NewRawRequest(method, relativePath string, body []byte, cancel <-chan bool) *RawRequest {
return &RawRequest{
method: method,
relativePath: relativePath,
body: body,
cancel: cancel,
}
}
func NewInstanceInfo(hostName, app, ip string, port int, ttl uint, isSsl bool) *InstanceInfo {
dataCenterInfo := &DataCenterInfo{
Name: "MyOwn",
Class: "com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo",
Metadata: nil,
}
leaseInfo := &LeaseInfo{
EvictionDurationInSecs: ttl,
}
instanceInfo := &InstanceInfo{
HostName: hostName,
App: app,
IpAddr: ip,
Status: UP,
DataCenterInfo: dataCenterInfo,
LeaseInfo: leaseInfo,
Metadata: nil,
}
stringPort := ""
if (port != 80 && port != 443) {
stringPort = ":" + strconv.Itoa(port)
}
var protocol string = "http"
if (isSsl) {
protocol = "https"
instanceInfo.secureVipAddress = protocol + "://" + hostName + stringPort
instanceInfo.SecurePort = &Port{
Port: port,
Enabled: true,
}
}else {
instanceInfo.VipAddress = protocol + "://" + hostName + stringPort
instanceInfo.Port = &Port{
Port: port,
Enabled: true,
}
}
instanceInfo.StatusPageUrl = protocol + "://" + hostName + stringPort + "/info"
return instanceInfo
}
// getCancelable issues a cancelable GET request
func (c *Client) getCancelable(endpoint string,
cancel <-chan bool) (*RawResponse, error) {
logger.Debug("get %s [%s]", endpoint, c.Cluster.Leader)
p := endpoint
req := NewRawRequest("GET", p, nil, cancel)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// get issues a GET request
func (c *Client) Get(endpoint string) (*RawResponse, error) {
return c.getCancelable(endpoint, nil)
}
// put issues a PUT request
func (c *Client) Put(endpoint string, body []byte) (*RawResponse, error) {
logger.Debug("put %s, %s, [%s]", endpoint, body, c.Cluster.Leader)
p := endpoint
req := NewRawRequest("PUT", p, body, nil)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// post issues a POST request
func (c *Client) Post(endpoint string, body []byte) (*RawResponse, error) {
logger.Debug("post %s, %s, [%s]", endpoint, body, c.Cluster.Leader)
p := endpoint
req := NewRawRequest("POST", p, body, nil)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
// delete issues a DELETE request
func (c *Client) Delete(endpoint string) (*RawResponse, error) {
logger.Debug("delete %s [%s]", endpoint, c.Cluster.Leader)
p := endpoint
req := NewRawRequest("DELETE", p, nil, nil)
resp, err := c.SendRequest(req)
if err != nil {
return nil, err
}
return resp, nil
}
func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) {
var req *http.Request
var resp *http.Response
var httpPath string
var err error
var respBody []byte
var numReqs = 1
checkRetry := c.CheckRetry
if checkRetry == nil {
checkRetry = DefaultCheckRetry
}
cancelled := make(chan bool, 1)
reqLock := new(sync.Mutex)
if rr.cancel != nil {
cancelRoutine := make(chan bool)
defer close(cancelRoutine)
go func() {
select {
case <-rr.cancel:
cancelled <- true
logger.Debug("send.request is cancelled")
case <-cancelRoutine:
return
}
// Repeat canceling request until this thread is stopped
// because we have no idea about whether it succeeds.
for {
reqLock.Lock()
c.httpClient.Transport.(*http.Transport).CancelRequest(req)
reqLock.Unlock()
select {
case <-time.After(100 * time.Millisecond):
case <-cancelRoutine:
return
}
}
}()
}
// If we connect to a follower and consistency is required, retry until
// we connect to a leader
sleep := 25 * time.Millisecond
maxSleep := time.Second
for attempt := 0;; attempt++ {
if attempt > 0 {
select {
case <-cancelled:
return nil, ErrRequestCancelled
case <-time.After(sleep):
sleep = sleep * 2
if sleep > maxSleep {
sleep = maxSleep
}
}
}
logger.Debug("Connecting to eureka: attempt %d for %s", attempt + 1, rr.relativePath)
httpPath = c.getHttpPath(false, rr.relativePath)
logger.Debug("send.request.to %s | method %s", httpPath, rr.method)
req, err := func() (*http.Request, error) {
reqLock.Lock()
defer reqLock.Unlock()
if req, err = http.NewRequest(rr.method, httpPath, bytes.NewReader(rr.body)); err != nil {
return nil, err
}
req.Header.Set("Content-Type",
"application/json")
return req, nil
}()
if err != nil {
return nil, err
}
resp, err = c.httpClient.Do(req)
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
// If the request was cancelled, return ErrRequestCancelled directly
select {
case <-cancelled:
return nil, ErrRequestCancelled
default:
}
numReqs++
// network error, change a machine!
if err != nil {
logger.Error("network error: %v", err.Error())
lastResp := http.Response{}
if checkErr := checkRetry(c.Cluster, numReqs, lastResp, err); checkErr != nil {
return nil, checkErr
}
c.Cluster.switchLeader(attempt % len(c.Cluster.Machines))
continue
}
// if there is no error, it should receive response
logger.Debug("recv.response.from "+httpPath)
if validHttpStatusCode[resp.StatusCode] {
// try to read byte code and break the loop
respBody, err = ioutil.ReadAll(resp.Body)
if err == nil {
logger.Debug("recv.success "+ httpPath)
break
}
// ReadAll error may be caused due to cancel request
select {
case <-cancelled:
return nil, ErrRequestCancelled
default:
}
if err == io.ErrUnexpectedEOF {
// underlying connection was closed prematurely, probably by timeout
// TODO: empty body or unexpectedEOF can cause http.Transport to get hosed;
// this allows the client to detect that and take evasive action. Need
// to revisit once code.google.com/p/go/issues/detail?id=8648 gets fixed.
respBody = []byte{}
break
}
}
// if resp is TemporaryRedirect, set the new leader and retry
if resp.StatusCode == http.StatusTemporaryRedirect {
u, err := resp.Location()
if err != nil {
logger.Warning("%v", err)
} else {
// Update cluster leader based on redirect location
// because it should point to the leader address
c.Cluster.updateLeaderFromURL(u)
logger.Debug("recv.response.relocate "+ u.String())
}
resp.Body.Close()
continue
}
if checkErr := checkRetry(c.Cluster, numReqs, *resp,
errors.New("Unexpected HTTP status code")); checkErr != nil {
return nil, checkErr
}
resp.Body.Close()
}
r := &RawResponse{
StatusCode: resp.StatusCode,
Body: respBody,
Header: resp.Header,
}
return r, nil
}
// DefaultCheckRetry defines the retrying behaviour for bad HTTP requests
// If we have retried 2 * machine number, stop retrying.
// If status code is InternalServerError, sleep for 200ms.
func DefaultCheckRetry(cluster *Cluster, numReqs int, lastResp http.Response,
err error) error {
if numReqs >= 2 * len(cluster.Machines) {
return newError(ErrCodeEurekaNotReachable,
"Tried to connect to each peer twice and failed", 0)
}
code := lastResp.StatusCode
if code == http.StatusInternalServerError {
time.Sleep(time.Millisecond * 200)
}
logger.Warning("bad response status code %d", code)
return nil
}
func (c *Client) getHttpPath(random bool, s ...string) string {
var machine string
if random {
machine = c.Cluster.Machines[rand.Intn(len(c.Cluster.Machines))]
} else {
machine = c.Cluster.Leader
}
fullPath := machine
for _, seg := range s {
fullPath += "/" + seg
}
return fullPath
}
// buildValues builds a url.Values map according to the given value and ttl
func buildValues(value string, ttl uint64) url.Values {
v := url.Values{}
if value != "" {
v.Set("value", value)
}
if ttl > 0 {
v.Set("ttl", fmt.Sprintf("%v", ttl))
}
return v
}

View file

@ -1,21 +0,0 @@
package eureka
import "net/http"
type RawResponse struct {
StatusCode int
Body []byte
Header http.Header
}
var (
validHttpStatusCode = map[int]bool{
http.StatusNoContent: true,
http.StatusCreated: true,
http.StatusOK: true,
http.StatusBadRequest: true,
http.StatusNotFound: true,
http.StatusPreconditionFailed: true,
http.StatusForbidden: true,
}
)

View file

@ -1,3 +0,0 @@
package eureka
const version = "v2"

View file

@ -1,23 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Arthur Halet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
The Software should rather be used for Good, not Evil.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -1,190 +0,0 @@
package gominlog
import (
"log"
"os"
"fmt"
"runtime"
"github.com/fatih/color"
"regexp"
"strings"
"io"
)
type Level int
const (
Loff = Level(^uint(0) >> 1)
Lsevere = Level(1000)
Lerror = Level(900)
Lwarning = Level(800)
Linfo = Level(700)
Ldebug = Level(600)
Lall = Level(-Loff - 1)
)
type MinLog struct {
log *log.Logger
level Level
packageName string
isColorized bool
}
func NewClassicMinLog() *MinLog {
minLog := &MinLog{}
logWriter := os.Stdout
flags := log.Lshortfile | log.Ldate | log.Ltime
minLog.log = log.New(logWriter, "", flags)
minLog.isColorized = true
minLog.packageName = ""
minLog.level = Lall
return minLog
}
func NewClassicMinLogWithPackageName(packageName string) *MinLog {
minLog := NewClassicMinLog()
minLog.SetPackageName(packageName)
return minLog
}
func NewMinLog(appName string, level Level, withColor bool, flag int) *MinLog {
minLog := &MinLog{}
logWriter := os.Stdout
minLog.log = log.New(logWriter, "", flag)
minLog.isColorized = withColor
minLog.packageName = appName
minLog.level = level
return minLog
}
func NewMinLogWithWriter(appName string, level Level, withColor bool, flag int, logWriter io.Writer) *MinLog {
minLog := &MinLog{}
minLog.log = log.New(logWriter, "", flag)
minLog.isColorized = withColor
minLog.packageName = appName
minLog.level = level
return minLog
}
func NewMinLogWithLogger(packageName string, level Level, withColor bool, logger *log.Logger) *MinLog {
minLog := &MinLog{}
minLog.log = logger
minLog.isColorized = withColor
minLog.packageName = packageName
minLog.level = level
return minLog
}
func (this *MinLog) GetLevel() Level {
return Level(this.level)
}
func (this *MinLog) SetWriter(writer io.Writer) {
this.log.SetOutput(writer)
}
func (this *MinLog) SetLevel(level Level) {
this.level = level
}
func (this *MinLog) SetPackageName(newPackageName string) {
this.packageName = newPackageName
}
func (this *MinLog) GetPackageName() string {
return this.packageName
}
func (this *MinLog) SetLogger(l *log.Logger) {
this.log = l
}
func (this *MinLog) WithColor(isColorized bool) {
this.isColorized = isColorized
}
func (this *MinLog) IsColorized() bool {
return this.isColorized
}
func (this *MinLog) GetLogger() *log.Logger {
return this.log
}
func (this *MinLog) logMessage(typeLog string, colorFg color.Attribute, colorBg color.Attribute, args ...interface{}) {
var text string
msg := ""
flags := this.log.Flags()
if (log.Lshortfile | flags) == flags {
msg += this.trace()
this.log.SetFlags(flags - log.Lshortfile)
}
text, ok := args[0].(string);
if !ok {
panic("Firt argument should be a string")
}
if len(args) > 1 {
newArgs := args[1:]
msg += typeLog + ": " + fmt.Sprintf(text, newArgs...)
} else {
msg += typeLog + ": " + text
}
this.writeMsgInLogger(msg, colorFg, colorBg)
this.log.SetFlags(flags)
}
func (this *MinLog) writeMsgInLogger(msg string, colorFg color.Attribute, colorBg color.Attribute) {
if this.isColorized && int(colorBg) == 0 {
msg = color.New(colorFg).Sprint(msg)
} else if this.isColorized {
msg = color.New(colorFg, colorBg).Sprint(msg)
}
this.log.Print(msg)
}
func (this *MinLog) Error(args ...interface{}) {
if this.level > Lerror {
return
}
this.logMessage("ERROR", color.FgRed, 0, args...)
}
func (this *MinLog) Severe(args ...interface{}) {
if this.level > Lsevere {
return
}
this.logMessage("SEVERE", color.FgRed, color.BgYellow, args...)
}
func (this *MinLog) Debug(args ...interface{}) {
if this.level > Ldebug {
return
}
this.logMessage("DEBUG", color.FgBlue, 0, args...)
}
func (this *MinLog) Info(args ...interface{}) {
if this.level > Linfo {
return
}
this.logMessage("INFO", color.FgCyan, 0, args...)
}
func (this *MinLog) Warning(args ...interface{}) {
if this.level > Lwarning {
return
}
this.logMessage("WARNING", color.FgYellow, 0, args...)
}
func (this *MinLog) trace() string {
var shortFile string
pc := make([]uintptr, 10)
runtime.Callers(2, pc)
f := runtime.FuncForPC(pc[2])
file, line := f.FileLine(pc[2])
if this.packageName == "" {
execFileSplit := strings.Split(os.Args[0], "/")
this.packageName = execFileSplit[len(execFileSplit) - 1]
}
regex, err := regexp.Compile(regexp.QuoteMeta(this.packageName) + "/(.*)")
if err != nil {
panic(err)
}
subMatch := regex.FindStringSubmatch(file)
if len(subMatch) < 2 {
fileSplit := strings.Split(file, "/")
shortFile = fileSplit[len(fileSplit) - 1]
} else {
shortFile = subMatch[1]
}
return fmt.Sprintf("/%s/%s:%d ", this.packageName, shortFile, line)
}

View file

@ -1,13 +0,0 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

View file

@ -1,22 +0,0 @@
/*
Package ty provides utilities for writing type parametric functions with run
time type safety.
This package contains two sub-packages `fun` and `data` which define some
potentially useful functions and abstractions using the type checker in
this package.
Requirements
Go tip (or 1.1 when it's released) is required. This package will not work
with Go 1.0.x or earlier.
The very foundation of this package only recently became possible with the
addition of 3 new functions in the standard library `reflect` package:
SliceOf, MapOf and ChanOf. In particular, it provides the ability to
dynamically construct types at run time from component types.
Further extensions to this package can be made if similar functions are added
for structs and functions(?).
*/
package ty

View file

@ -1,84 +0,0 @@
package fun
import (
"reflect"
"github.com/BurntSushi/ty"
)
// AsyncChan has a parametric type:
//
// func AsyncChan(chan A) (send chan<- A, recv <-chan A)
//
// AsyncChan provides a channel abstraction without a fixed size buffer.
// The input should be a pointer to a channel that has a type without a
// direction, e.g., `new(chan int)`. Two new channels are returned: `send` and
// `recv`. The caller must send data on the `send` channel and receive data on
// the `recv` channel.
//
// Implementation is inspired by Kyle Lemons' work:
// https://github.com/kylelemons/iq/blob/master/iq_slice.go
func AsyncChan(baseChan interface{}) (send, recv interface{}) {
chk := ty.Check(
new(func(*chan ty.A) (chan ty.A, chan ty.A)),
baseChan)
// We don't care about the baseChan---it is only used to construct
// the return types.
tsend, trecv := chk.Returns[0], chk.Returns[1]
buf := make([]reflect.Value, 0, 10)
rsend := reflect.MakeChan(tsend, 0)
rrecv := reflect.MakeChan(trecv, 0)
go func() {
defer rrecv.Close()
BUFLOOP:
for {
if len(buf) == 0 {
rv, ok := rsend.Recv()
if !ok {
break BUFLOOP
}
buf = append(buf, rv)
}
cases := []reflect.SelectCase{
// case v, ok := <-send
{
Dir: reflect.SelectRecv,
Chan: rsend,
},
// case recv <- buf[0]
{
Dir: reflect.SelectSend,
Chan: rrecv,
Send: buf[0],
},
}
choice, rval, rok := reflect.Select(cases)
switch choice {
case 0:
// case v, ok := <-send
if !rok {
break BUFLOOP
}
buf = append(buf, rval)
case 1:
// case recv <- buf[0]
buf = buf[1:]
default:
panic("bug")
}
}
for _, rv := range buf {
rrecv.Send(rv)
}
}()
// Create the directional channel types.
tsDir := reflect.ChanOf(reflect.SendDir, tsend.Elem())
trDir := reflect.ChanOf(reflect.RecvDir, trecv.Elem())
return rsend.Convert(tsDir).Interface(), rrecv.Convert(trDir).Interface()
}

View file

@ -1,118 +0,0 @@
/*
Package fun provides type parametric utility functions for lists, sets,
channels and maps.
The central contribution of this package is a set of functions that operate
on values without depending on their types while maintaining type safety at
run time using the `reflect` package.
There are two primary concerns when deciding whether to use this package
or not: the loss of compile time type safety and performance. In particular,
with regard to performance, most functions here are much slower than their
built-in counter parts. However, there are a couple where the overhead of
reflection is relatively insignificant: AsyncChan and ParMap.
In terms of code structure and organization, the price is mostly paid inside
of the package due to the annoyances of operating with `reflect`. The caller
usually only has one obligation other than to provide values consistent with
the type of the function: type assert the result to the desired type.
When the caller provides values that are inconsistent with the parametric type
of the function, the function will panic with a `TypeError`. (Either because
the types cannot be unified or because they cannot be constructed due to
limitations of the `reflect` package. See the `github.com/BurntSushi/ty`
package for more details.)
Requirements
Go tip (or 1.1 when it's released) is required. This package will not work
with Go 1.0.x or earlier.
The very foundation of this package only recently became possible with the
addition of 3 new functions in the standard library `reflect` package:
SliceOf, MapOf and ChanOf. In particular, it provides the ability to
dynamically construct types at run time from component types.
Further extensions to this package can be made if similar functions are added
for structs and functions(?).
Examples
Squaring each integer in a slice:
square := func(x int) int { return x * x }
nums := []int{1, 2, 3, 4, 5}
squares := Map(square, nums).([]int)
Reversing any slice:
slice := []string{"a", "b", "c"}
reversed := Reverse(slice).([]string)
Sorting any slice:
// Sort a slice of structs with first class functions.
type Album struct {
Title string
Year int
}
albums := []Album{
{"Born to Run", 1975},
{"WIESS", 1973},
{"Darkness", 1978},
{"Greetings", 1973},
}
less := func(a, b Album) bool { return a.Year < b.Year },
sorted := QuickSort(less, albums).([]Album)
Parallel map:
// Compute the prime factorization concurrently
// for every integer in [1000, 10000].
primeFactors := func(n int) []int { // compute prime factors }
factors := ParMap(primeFactors, Range(1000, 10001)).([]int)
Asynchronous channel without a fixed size buffer:
s, r := AsyncChan(new(chan int))
send, recv := s.(chan<- int), r.(<-chan int)
// Send as much as you want.
for i := 0; i < 100; i++ {
s <- i
}
close(s)
for i := range recv {
// do something with `i`
}
Shuffle any slice in place:
jumbleMe := []string{"The", "quick", "brown", "fox"}
Shuffle(jumbleMe)
Function memoization:
// Memoizing a recursive function like `fibonacci`.
// Write it like normal:
var fib func(n int64) int64
fib = func(n int64) int64 {
switch n {
case 0:
return 0
case 1:
return 1
}
return fib(n - 1) + fib(n - 2)
}
// And wrap it with `Memo`.
fib = Memo(fib).(func(int64) int64)
// Will keep your CPU busy for a long time
// without memoization.
fmt.Println(fib(80))
*/
package fun

View file

@ -1,35 +0,0 @@
package fun
import (
"reflect"
"github.com/BurntSushi/ty"
)
// Memo has a parametric type:
//
// func Memo(f func(A) B) func(A) B
//
// Memo memoizes any function of a single argument that returns a single value.
// The type `A` must be a Go type for which the comparison operators `==` and
// `!=` are fully defined (this rules out functions, maps and slices).
func Memo(f interface{}) interface{} {
chk := ty.Check(
new(func(func(ty.A) ty.B)),
f)
vf := chk.Args[0]
saved := make(map[interface{}]reflect.Value)
memo := func(in []reflect.Value) []reflect.Value {
val := in[0].Interface()
ret, ok := saved[val]
if ok {
return []reflect.Value{ret}
}
ret = call1(vf, in[0])
saved[val] = ret
return []reflect.Value{ret}
}
return reflect.MakeFunc(vf.Type(), memo).Interface()
}

View file

@ -1,303 +0,0 @@
package fun
import (
"reflect"
"runtime"
"sync"
"github.com/BurntSushi/ty"
)
// All has a parametric type:
//
// func All(p func(A) bool, xs []A) bool
//
// All returns `true` if and only if every element in `xs` satisfies `p`.
func All(f, xs interface{}) bool {
chk := ty.Check(
new(func(func(ty.A) bool, []ty.A) bool),
f, xs)
vf, vxs := chk.Args[0], chk.Args[1]
xsLen := vxs.Len()
for i := 0; i < xsLen; i++ {
if !call1(vf, vxs.Index(i)).Interface().(bool) {
return false
}
}
return true
}
// Exists has a parametric type:
//
// func Exists(p func(A) bool, xs []A) bool
//
// Exists returns `true` if and only if an element in `xs` satisfies `p`.
func Exists(f, xs interface{}) bool {
chk := ty.Check(
new(func(func(ty.A) bool, []ty.A) bool),
f, xs)
vf, vxs := chk.Args[0], chk.Args[1]
xsLen := vxs.Len()
for i := 0; i < xsLen; i++ {
if call1(vf, vxs.Index(i)).Interface().(bool) {
return true
}
}
return false
}
// In has a parametric type:
//
// func In(needle A, haystack []A) bool
//
// In returns `true` if and only if `v` can be found in `xs`. The equality test
// used is Go's standard `==` equality and NOT deep equality.
//
// Note that this requires that `A` be a type that can be meaningfully compared.
func In(needle, haystack interface{}) bool {
chk := ty.Check(
new(func(ty.A, []ty.A) bool),
needle, haystack)
vhaystack := chk.Args[1]
length := vhaystack.Len()
for i := 0; i < length; i++ {
if vhaystack.Index(i).Interface() == needle {
return true
}
}
return false
}
// Map has a parametric type:
//
// func Map(f func(A) B, xs []A) []B
//
// Map returns the list corresponding to the return value of applying
// `f` to each element in `xs`.
func Map(f, xs interface{}) interface{} {
chk := ty.Check(
new(func(func(ty.A) ty.B, []ty.A) []ty.B),
f, xs)
vf, vxs, tys := chk.Args[0], chk.Args[1], chk.Returns[0]
xsLen := vxs.Len()
vys := reflect.MakeSlice(tys, xsLen, xsLen)
for i := 0; i < xsLen; i++ {
vy := call1(vf, vxs.Index(i))
vys.Index(i).Set(vy)
}
return vys.Interface()
}
// Filter has a parametric type:
//
// func Filter(p func(A) bool, xs []A) []A
//
// Filter returns a new list only containing the elements of `xs` that satisfy
// the predicate `p`.
func Filter(p, xs interface{}) interface{} {
chk := ty.Check(
new(func(func(ty.A) bool, []ty.A) []ty.A),
p, xs)
vp, vxs, tys := chk.Args[0], chk.Args[1], chk.Returns[0]
xsLen := vxs.Len()
vys := reflect.MakeSlice(tys, 0, xsLen)
for i := 0; i < xsLen; i++ {
vx := vxs.Index(i)
if call1(vp, vx).Bool() {
vys = reflect.Append(vys, vx)
}
}
return vys.Interface()
}
// Foldl has a parametric type:
//
// func Foldl(f func(A, B) B, init B, xs []A) B
//
// Foldl reduces a list of A to a single element B using a left fold with
// an initial value `init`.
func Foldl(f, init, xs interface{}) interface{} {
chk := ty.Check(
new(func(func(ty.A, ty.B) ty.B, ty.B, []ty.A) ty.B),
f, init, xs)
vf, vinit, vxs, tb := chk.Args[0], chk.Args[1], chk.Args[2], chk.Returns[0]
xsLen := vxs.Len()
vb := zeroValue(tb)
vb.Set(vinit)
if xsLen == 0 {
return vb.Interface()
}
vb.Set(call1(vf, vxs.Index(0), vb))
for i := 1; i < xsLen; i++ {
vb.Set(call1(vf, vxs.Index(i), vb))
}
return vb.Interface()
}
// Foldr has a parametric type:
//
// func Foldr(f func(A, B) B, init B, xs []A) B
//
// Foldr reduces a list of A to a single element B using a right fold with
// an initial value `init`.
func Foldr(f, init, xs interface{}) interface{} {
chk := ty.Check(
new(func(func(ty.A, ty.B) ty.B, ty.B, []ty.A) ty.B),
f, init, xs)
vf, vinit, vxs, tb := chk.Args[0], chk.Args[1], chk.Args[2], chk.Returns[0]
xsLen := vxs.Len()
vb := zeroValue(tb)
vb.Set(vinit)
if xsLen == 0 {
return vb.Interface()
}
vb.Set(call1(vf, vxs.Index(xsLen-1), vb))
for i := xsLen - 2; i >= 0; i-- {
vb.Set(call1(vf, vxs.Index(i), vb))
}
return vb.Interface()
}
// Concat has a parametric type:
//
// func Concat(xs [][]A) []A
//
// Concat returns a new flattened list by appending all elements of `xs`.
func Concat(xs interface{}) interface{} {
chk := ty.Check(
new(func([][]ty.A) []ty.A),
xs)
vxs, tflat := chk.Args[0], chk.Returns[0]
xsLen := vxs.Len()
vflat := reflect.MakeSlice(tflat, 0, xsLen*3)
for i := 0; i < xsLen; i++ {
vflat = reflect.AppendSlice(vflat, vxs.Index(i))
}
return vflat.Interface()
}
// Reverse has a parametric type:
//
// func Reverse(xs []A) []A
//
// Reverse returns a new slice that is the reverse of `xs`.
func Reverse(xs interface{}) interface{} {
chk := ty.Check(
new(func([]ty.A) []ty.A),
xs)
vxs, tys := chk.Args[0], chk.Returns[0]
xsLen := vxs.Len()
vys := reflect.MakeSlice(tys, xsLen, xsLen)
for i := 0; i < xsLen; i++ {
vys.Index(i).Set(vxs.Index(xsLen - 1 - i))
}
return vys.Interface()
}
// Copy has a parametric type:
//
// func Copy(xs []A) []A
//
// Copy returns a copy of `xs` using Go's `copy` operation.
func Copy(xs interface{}) interface{} {
chk := ty.Check(
new(func([]ty.A) []ty.A),
xs)
vxs, tys := chk.Args[0], chk.Returns[0]
xsLen := vxs.Len()
vys := reflect.MakeSlice(tys, xsLen, xsLen)
reflect.Copy(vys, vxs)
return vys.Interface()
}
// ParMap has a parametric type:
//
// func ParMap(f func(A) B, xs []A) []B
//
// ParMap is just like Map, except it applies `f` to each element in `xs`
// concurrently using N worker goroutines (where N is the number of CPUs
// available reported by the Go runtime). If you want to control the number
// of goroutines spawned, use `ParMapN`.
//
// It is important that `f` not be a trivial operation, otherwise the overhead
// of executing it concurrently will result in worse performance than using
// a `Map`.
func ParMap(f, xs interface{}) interface{} {
n := runtime.NumCPU()
if n < 1 {
n = 1
}
return ParMapN(f, xs, n)
}
// ParMapN has a parametric type:
//
// func ParMapN(f func(A) B, xs []A, n int) []B
//
// ParMapN is just like Map, except it applies `f` to each element in `xs`
// concurrently using `n` worker goroutines.
//
// It is important that `f` not be a trivial operation, otherwise the overhead
// of executing it concurrently will result in worse performance than using
// a `Map`.
func ParMapN(f, xs interface{}, n int) interface{} {
chk := ty.Check(
new(func(func(ty.A) ty.B, []ty.A) []ty.B),
f, xs)
vf, vxs, tys := chk.Args[0], chk.Args[1], chk.Returns[0]
xsLen := vxs.Len()
ys := reflect.MakeSlice(tys, xsLen, xsLen)
if n < 1 {
n = 1
}
work := make(chan int, n)
wg := new(sync.WaitGroup)
for i := 0; i < n; i++ {
wg.Add(1)
go func() {
for j := range work {
// Good golly miss molly. Is `reflect.Value.Index`
// safe to access/set from multiple goroutines?
// XXX: If not, we'll need an extra wave of allocation to
// use real slices of `reflect.Value`.
ys.Index(j).Set(call1(vf, vxs.Index(j)))
}
wg.Done()
}()
}
for i := 0; i < xsLen; i++ {
work <- i
}
close(work)
wg.Wait()
return ys.Interface()
}
// Range generates a list of integers corresponding to every integer in
// the half-open interval [x, y).
//
// Range will panic if `end < start`.
func Range(start, end int) []int {
if end < start {
panic("range must have end greater than or equal to start")
}
r := make([]int, end-start)
for i := start; i < end; i++ {
r[i-start] = i
}
return r
}

View file

@ -1,46 +0,0 @@
package fun
import (
"reflect"
"github.com/BurntSushi/ty"
)
// Keys has a parametric type:
//
// func Keys(m map[A]B) []A
//
// Keys returns a list of the keys of `m` in an unspecified order.
func Keys(m interface{}) interface{} {
chk := ty.Check(
new(func(map[ty.A]ty.B) []ty.A),
m)
vm, tkeys := chk.Args[0], chk.Returns[0]
vkeys := reflect.MakeSlice(tkeys, vm.Len(), vm.Len())
for i, vkey := range vm.MapKeys() {
vkeys.Index(i).Set(vkey)
}
return vkeys.Interface()
}
// Values has a parametric type:
//
// func Values(m map[A]B) []B
//
// Values returns a list of the values of `m` in an unspecified order.
func Values(m interface{}) interface{} {
chk := ty.Check(
new(func(map[ty.A]ty.B) []ty.B),
m)
vm, tvals := chk.Args[0], chk.Returns[0]
vvals := reflect.MakeSlice(tvals, vm.Len(), vm.Len())
for i, vkey := range vm.MapKeys() {
vvals.Index(i).Set(vm.MapIndex(vkey))
}
return vvals.Interface()
}
// func MapMerge(m1, m2 interface{}) interface{} {
// }

View file

@ -1,94 +0,0 @@
package fun
import (
"math/rand"
"reflect"
"time"
"github.com/BurntSushi/ty"
)
var randNumGen *rand.Rand
func init() {
randNumGen = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// ShuffleGen has a parametric type:
//
// func ShuffleGen(xs []A, rng *rand.Rand)
//
// ShuffleGen shuffles `xs` in place using the given random number
// generator `rng`.
func ShuffleGen(xs interface{}, rng *rand.Rand) {
chk := ty.Check(
new(func([]ty.A, *rand.Rand)),
xs, rng)
vxs := chk.Args[0]
// Implements the Fisher-Yates shuffle: http://goo.gl/Hb9vg
xsLen := vxs.Len()
swapper := swapperOf(vxs.Type().Elem())
for i := xsLen - 1; i >= 1; i-- {
j := rng.Intn(i + 1)
swapper.swap(vxs.Index(i), vxs.Index(j))
}
}
// Shuffle has a parametric type:
//
// func Shuffle(xs []A)
//
// Shuffle shuffles `xs` in place using a default random number
// generator seeded once at program initialization.
func Shuffle(xs interface{}) {
ShuffleGen(xs, randNumGen)
}
// Sample has a parametric type:
//
// func Sample(population []A, n int) []A
//
// Sample returns a random sample of size `n` from a list
// `population` using a default random number generator seeded once at
// program initialization.
// All elements in `population` have an equal chance of being selected.
// If `n` is greater than the size of `population`, then `n` is set to
// the size of the population.
func Sample(population interface{}, n int) interface{} {
return SampleGen(population, n, randNumGen)
}
// SampleGen has a parametric type:
//
// func SampleGen(population []A, n int, rng *rand.Rand) []A
//
// SampleGen returns a random sample of size `n` from a list
// `population` using a given random number generator `rng`.
// All elements in `population` have an equal chance of being selected.
// If `n` is greater than the size of `population`, then `n` is set to
// the size of the population.
func SampleGen(population interface{}, n int, rng *rand.Rand) interface{} {
chk := ty.Check(
new(func([]ty.A, int, *rand.Rand) []ty.A),
population, n, rng)
rpop, tsamp := chk.Args[0], chk.Returns[0]
popLen := rpop.Len()
if n == 0 {
return reflect.MakeSlice(tsamp, 0, 0).Interface()
}
if n > popLen {
n = popLen
}
// TODO(burntsushi): Implement an algorithm that doesn't depend on
// the size of the population.
rsamp := reflect.MakeSlice(tsamp, n, n)
choices := rng.Perm(popLen)
for i := 0; i < n; i++ {
rsamp.Index(i).Set(rpop.Index(choices[i]))
}
return rsamp.Interface()
}

View file

@ -1,99 +0,0 @@
package fun
import (
"reflect"
"github.com/BurntSushi/ty"
)
// Set has a parametric type:
//
// func Set(xs []A) map[A]bool
//
// Set creates a set from a list.
func Set(xs interface{}) interface{} {
chk := ty.Check(
new(func([]ty.A) map[ty.A]bool),
xs)
vxs, tset := chk.Args[0], chk.Returns[0]
vtrue := reflect.ValueOf(true)
vset := reflect.MakeMap(tset)
xsLen := vxs.Len()
for i := 0; i < xsLen; i++ {
vset.SetMapIndex(vxs.Index(i), vtrue)
}
return vset.Interface()
}
// Union has a parametric type:
//
// func Union(a map[A]bool, b map[A]bool) map[A]bool
//
// Union returns the union of two sets, where a set is represented as a
// `map[A]bool`. The sets `a` and `b` are not modified.
func Union(a, b interface{}) interface{} {
chk := ty.Check(
new(func(map[ty.A]bool, map[ty.A]bool) map[ty.A]bool),
a, b)
va, vb, tc := chk.Args[0], chk.Args[1], chk.Returns[0]
vtrue := reflect.ValueOf(true)
vc := reflect.MakeMap(tc)
for _, vkey := range va.MapKeys() {
vc.SetMapIndex(vkey, vtrue)
}
for _, vkey := range vb.MapKeys() {
vc.SetMapIndex(vkey, vtrue)
}
return vc.Interface()
}
// Intersection has a parametric type:
//
// func Intersection(a map[A]bool, b map[A]bool) map[A]bool
//
// Intersection returns the intersection of two sets, where a set is
// represented as a `map[A]bool`. The sets `a` and `b` are not modified.
func Intersection(a, b interface{}) interface{} {
chk := ty.Check(
new(func(map[ty.A]bool, map[ty.A]bool) map[ty.A]bool),
a, b)
va, vb, tc := chk.Args[0], chk.Args[1], chk.Returns[0]
vtrue := reflect.ValueOf(true)
vc := reflect.MakeMap(tc)
for _, vkey := range va.MapKeys() {
if vb.MapIndex(vkey).IsValid() {
vc.SetMapIndex(vkey, vtrue)
}
}
for _, vkey := range vb.MapKeys() {
if va.MapIndex(vkey).IsValid() {
vc.SetMapIndex(vkey, vtrue)
}
}
return vc.Interface()
}
// Difference has a parametric type:
//
// func Difference(a map[A]bool, b map[A]bool) map[A]bool
//
// Difference returns a set with all elements in `a` that are not in `b`.
// The sets `a` and `b` are not modified.
func Difference(a, b interface{}) interface{} {
chk := ty.Check(
new(func(map[ty.A]bool, map[ty.A]bool) map[ty.A]bool),
a, b)
va, vb, tc := chk.Args[0], chk.Args[1], chk.Returns[0]
vtrue := reflect.ValueOf(true)
vc := reflect.MakeMap(tc)
for _, vkey := range va.MapKeys() {
if !vb.MapIndex(vkey).IsValid() {
vc.SetMapIndex(vkey, vtrue)
}
}
return vc.Interface()
}

View file

@ -1,98 +0,0 @@
package fun
import (
"reflect"
"sort"
"github.com/BurntSushi/ty"
)
// QuickSort has a parametric type:
//
// func QuickSort(less func(x1 A, x2 A) bool, []A) []A
//
// QuickSort applies the "quicksort" algorithm to return a new sorted list
// of `xs`, where `xs` is not modified.
//
// `less` should be a function that returns true if and only if `x1` is less
// than `x2`.
func QuickSort(less, xs interface{}) interface{} {
chk := ty.Check(
new(func(func(ty.A, ty.A) bool, []ty.A) []ty.A),
less, xs)
vless, vxs, tys := chk.Args[0], chk.Args[1], chk.Returns[0]
var qsort func(left, right int)
var partition func(left, right, pivot int) int
xsind := Range(0, vxs.Len())
qsort = func(left, right int) {
if left >= right {
return
}
pivot := (left + right) / 2
pivot = partition(left, right, pivot)
qsort(left, pivot-1)
qsort(pivot+1, right)
}
partition = func(left, right, pivot int) int {
vpivot := xsind[pivot]
xsind[pivot], xsind[right] = xsind[right], xsind[pivot]
ind := left
for i := left; i < right; i++ {
if call1(vless, vxs.Index(xsind[i]), vxs.Index(vpivot)).Bool() {
xsind[i], xsind[ind] = xsind[ind], xsind[i]
ind++
}
}
xsind[ind], xsind[right] = xsind[right], xsind[ind]
return ind
}
// Sort `xsind` in place.
qsort(0, len(xsind)-1)
vys := reflect.MakeSlice(tys, len(xsind), len(xsind))
for i, xsIndex := range xsind {
vys.Index(i).Set(vxs.Index(xsIndex))
}
return vys.Interface()
}
// Sort has a parametric type:
//
// func Sort(less func(x1 A, x2 A) bool, []A)
//
// Sort uses the standard library `sort` package to sort `xs` in place.
//
// `less` should be a function that returns true if and only if `x1` is less
// than `x2`.
func Sort(less, xs interface{}) {
chk := ty.Check(
new(func(func(ty.A, ty.A) bool, []ty.A)),
less, xs)
vless, vxs := chk.Args[0], chk.Args[1]
sort.Sort(&sortable{vless, vxs, swapperOf(vxs.Type().Elem())})
}
type sortable struct {
less reflect.Value
xs reflect.Value
swapper swapper
}
func (s *sortable) Less(i, j int) bool {
ith, jth := s.xs.Index(i), s.xs.Index(j)
return call1(s.less, ith, jth).Bool()
}
func (s *sortable) Swap(i, j int) {
s.swapper.swap(s.xs.Index(i), s.xs.Index(j))
}
func (s *sortable) Len() int {
return s.xs.Len()
}

View file

@ -1,37 +0,0 @@
package fun
import (
"reflect"
)
func zeroValue(typ reflect.Type) reflect.Value {
return reflect.New(typ).Elem()
}
type swapper reflect.Value
func swapperOf(typ reflect.Type) swapper {
return swapper(zeroValue(typ))
}
func (s swapper) swap(a, b reflect.Value) {
vs := reflect.Value(s)
vs.Set(a)
a.Set(b)
b.Set(vs)
}
func call(f reflect.Value, args ...reflect.Value) {
f.Call(args)
}
func call1(f reflect.Value, args ...reflect.Value) reflect.Value {
return f.Call(args)[0]
}
func call2(f reflect.Value, args ...reflect.Value) (
reflect.Value, reflect.Value) {
ret := f.Call(args)
return ret[0], ret[1]
}

View file

@ -1,338 +0,0 @@
package ty
import (
"fmt"
"reflect"
"strings"
)
// TypeError corresponds to any error reported by the `Check` function.
// Since `Check` panics, if you want to run `Check` safely, it is
// appropriate to recover and use a type switch to discover a `TypeError`
// value.
type TypeError string
func (te TypeError) Error() string {
return string(te)
}
func pe(format string, v ...interface{}) TypeError {
return TypeError(fmt.Sprintf(format, v...))
}
func ppe(format string, v ...interface{}) {
panic(pe(format, v...))
}
// Typed corresponds to the information returned by `Check`.
type Typed struct {
// In correspondence with the `as` parameter to `Check`.
Args []reflect.Value
// In correspondence with the return types of `f` in `Check`.
Returns []reflect.Type
// The type environment generated via unification in `Check`.
// (Its usefulness in the public API is questionable.)
TypeEnv map[string]reflect.Type
}
// Check accepts a function `f`, which may have a parametric type, along with a
// number of arguments in correspondence with the arguments to `f`,
// and returns inferred Go type information. This type information includes
// a list of `reflect.Value` in correspondence with `as`, a list of
// `reflect.Type` in correspondence with the return types of `f` and a type
// environment mapping type variables to `reflect.Type`.
//
// The power of `Check` comes from the following invariant: if `Check` returns,
// then the types of the arguments corresponding to `as` are consistent
// with the parametric type of `f`, *and* the parametric return types of `f`
// were made into valid Go types that are not parametric. Otherwise, there is
// a bug in `Check`.
//
// More concretely, consider a simple parametric function `Map`, which
// transforms a list of elements by applying a function to each element in
// order to generate a new list. Such a function constructed only for integers
// might have a type like
//
// func Map(func(int) int, []int) []int
//
// But the parametric type of `Map` could be given with
//
// func Map(func(A) B, []A) []B
//
// which in English reads, "Given a function from any type `A` to any type `B`
// and a slice of `A`, `Map` returns a slice of `B`."
//
// To write a parametric function like `Map`, one can pass a pointer
// to a nil function of the desired parametric type to get the reflection
// information:
//
// func Map(f, xs interface{}) interface{} {
// // Given the parametric type and the arguments, Check will
// // return all the reflection information you need to write `Map`.
// uni := ty.Check(
// new(func(func(ty.A) ty.B, []ty.A) []ty.B),
// f, xs)
//
// // `vf` and `vxs` are `reflect.Value`s of `f` and `xs`.
// vf, vxs := uni.Args[0], uni.Args[1]
//
// // `tys` is a `reflect.Type` of `[]ty.B` where `ty.B` is replaced
// // with the return type of the given function `f`.
// tys := uni.Returns[0]
//
// // Given the promise of `Check`, we now know that `vf` has
// // type `func(ty.A) ty.B` and `vxs` has type `[]ty.A`.
// xsLen := vxs.Len()
//
// // Constructs a new slice which will have type `[]ty.B`.
// vys := reflect.MakeSlice(tys, xsLen, xsLen)
//
// // Actually perform the `Map` operation, but in the world of
// // reflection.
// for i := 0; i < xsLen; i++ {
// vy := vf.Call([]reflect.Value{vxs.Index(i)})[0]
// vys.Index(i).Set(vy)
// }
//
// // The `reflect.Value.Interface` method is how we exit the world of
// // reflection. The onus is now on the caller to type assert it to
// // the appropriate type.
// return vys.Interface()
// }
//
// Working in the reflection world is certainly more inconvenient than writing
// regular Go code, but the information and invariants held by `Check` provide
// a more convenient experience than how one normally works with reflection.
// (Notice that there is no error-prone type switching or boiler plate to
// construct new types, since `Check` guarantees the types are consistent
// with the inputs for us.)
//
// And while writing such functions is still not so convenient,
// invoking them is simple:
//
// square := func(x int) int { return x * x }
// squared := Map(square, []int{1, 2, 3, 4, 5}).([]int)
//
// Restrictions
//
// There are a few restrictions imposed on the parametric return types of
// `f`: type variables may only be found in types that can be composed by the
// `reflect` package. This *only* includes channels, maps, pointers and slices.
// If a type variable is found in an array, function or struct, `Check` will
// panic.
//
// Also, type variables inside of structs are ignored in the types of the
// arguments `as`. This restriction may be lifted in the future.
//
// To be clear: type variables *may* appear in arrays or functions in the types
// of the arguments `as`.
func Check(f interface{}, as ...interface{}) *Typed {
rf := reflect.ValueOf(f)
tf := rf.Type()
if tf.Kind() == reflect.Ptr {
rf = reflect.Indirect(rf)
tf = rf.Type()
}
if tf.Kind() != reflect.Func {
ppe("The type of `f` must be a function, but it is a '%s'.", tf.Kind())
}
if tf.NumIn() != len(as) {
ppe("`f` expects %d arguments, but only %d were given.",
tf.NumIn(), len(as))
}
// Populate the argument value list.
args := make([]reflect.Value, len(as))
for i := 0; i < len(as); i++ {
args[i] = reflect.ValueOf(as[i])
}
// Populate our type variable environment through unification.
tyenv := make(tyenv)
for i := 0; i < len(args); i++ {
tp := typePair{tyenv, tf.In(i), args[i].Type()}
// Mutates the type variable environment.
if err := tp.unify(tp.param, tp.input); err != nil {
argTypes := make([]string, len(args))
for i := range args {
argTypes[i] = args[i].Type().String()
}
ppe("\nError type checking\n\t%s\nwith argument types\n\t(%s)\n%s",
tf, strings.Join(argTypes, ", "), err)
}
}
// Now substitute those types into the return types of `f`.
retTypes := make([]reflect.Type, tf.NumOut())
for i := 0; i < tf.NumOut(); i++ {
retTypes[i] = (&returnType{tyenv, tf.Out(i)}).tysubst(tf.Out(i))
}
return &Typed{args, retTypes, map[string]reflect.Type(tyenv)}
}
// tyenv maps type variable names to their inferred Go type.
type tyenv map[string]reflect.Type
// typePair represents a pair of types to be unified. They act as a way to
// report sensible error messages from within the unification algorithm.
//
// It also includes a type environment, which is mutated during unification.
type typePair struct {
tyenv tyenv
param reflect.Type
input reflect.Type
}
func (tp typePair) error(format string, v ...interface{}) error {
return pe("Type error when unifying type '%s' and '%s': %s",
tp.param, tp.input, fmt.Sprintf(format, v...))
}
// unify attempts to satisfy a pair of types, where the `param` type is the
// expected type of a function argument and the `input` type is the known
// type of a function argument. The `param` type may be parametric (that is,
// it may contain a type that is convertible to TypeVariable) but the
// `input` type may *not* be parametric.
//
// Any failure to unify the two types results in a panic.
//
// The end result of unification is a type environment: a set of substitutions
// from type variable to a Go type.
func (tp typePair) unify(param, input reflect.Type) error {
if tyname := tyvarName(input); len(tyname) > 0 {
return tp.error("Type variables are not allowed in the types of " +
"arguments.")
}
if tyname := tyvarName(param); len(tyname) > 0 {
if cur, ok := tp.tyenv[tyname]; ok && cur != input {
return tp.error("Type variable %s expected type '%s' but got '%s'.",
tyname, cur, input)
} else if !ok {
tp.tyenv[tyname] = input
}
return nil
}
if param.Kind() != input.Kind() {
return tp.error("Cannot unify different kinds of types '%s' and '%s'.",
param, input)
}
switch param.Kind() {
case reflect.Array:
return tp.unify(param.Elem(), input.Elem())
case reflect.Chan:
if param.ChanDir() != input.ChanDir() {
return tp.error("Cannot unify '%s' with '%s' "+
"(channel directions are different: '%s' != '%s').",
param, input, param.ChanDir(), input.ChanDir())
}
return tp.unify(param.Elem(), input.Elem())
case reflect.Func:
if param.NumIn() != input.NumIn() || param.NumOut() != input.NumOut() {
return tp.error("Cannot unify '%s' with '%s'.", param, input)
}
for i := 0; i < param.NumIn(); i++ {
if err := tp.unify(param.In(i), input.In(i)); err != nil {
return err
}
}
for i := 0; i < param.NumOut(); i++ {
if err := tp.unify(param.Out(i), input.Out(i)); err != nil {
return err
}
}
case reflect.Map:
if err := tp.unify(param.Key(), input.Key()); err != nil {
return err
}
return tp.unify(param.Elem(), input.Elem())
case reflect.Ptr:
return tp.unify(param.Elem(), input.Elem())
case reflect.Slice:
return tp.unify(param.Elem(), input.Elem())
}
// The only other container types are Interface and Struct.
// I am unsure about what to do with interfaces. Mind is fuzzy.
// Structs? I don't think it really makes much sense to use type
// variables inside of them.
return nil
}
// returnType corresponds to the type of a single return value of a function,
// in which the type may be parametric. It also contains a type environment
// constructed from unification.
type returnType struct {
tyenv tyenv
typ reflect.Type
}
func (rt returnType) panic(format string, v ...interface{}) {
ppe("Error substituting in return type '%s': %s",
rt.typ, fmt.Sprintf(format, v...))
}
// tysubst attempts to substitute all type variables within a single return
// type with their corresponding Go type from the type environment.
//
// tysubst will panic if a type variable is unbound, or if it encounters a
// type that cannot be dynamically created. Such types include arrays,
// functions and structs. (A limitation of the `reflect` package.)
func (rt returnType) tysubst(typ reflect.Type) reflect.Type {
if tyname := tyvarName(typ); len(tyname) > 0 {
if thetype, ok := rt.tyenv[tyname]; !ok {
rt.panic("Unbound type variable %s.", tyname)
} else {
return thetype
}
}
switch typ.Kind() {
case reflect.Array:
rt.panic("Cannot dynamically create Array types.")
case reflect.Chan:
return reflect.ChanOf(typ.ChanDir(), rt.tysubst(typ.Elem()))
case reflect.Func:
rt.panic("Cannot dynamically create Function types.")
case reflect.Interface:
// rt.panic("TODO")
// Not sure if this is right.
return typ
case reflect.Map:
return reflect.MapOf(rt.tysubst(typ.Key()), rt.tysubst(typ.Elem()))
case reflect.Ptr:
return reflect.PtrTo(rt.tysubst(typ.Elem()))
case reflect.Slice:
return reflect.SliceOf(rt.tysubst(typ.Elem()))
case reflect.Struct:
rt.panic("Cannot dynamically create Struct types.")
case reflect.UnsafePointer:
rt.panic("Cannot dynamically create unsafe.Pointer types.")
}
// We've covered all the composite types, so we're only left with
// base types.
return typ
}
func tyvarName(t reflect.Type) string {
if !t.ConvertibleTo(tyvarUnderlyingType) {
return ""
}
return t.Name()
}
// AssertType panics with a `TypeError` if `v` does not have type `t`.
// Otherwise, it returns the `reflect.Value` of `v`.
func AssertType(v interface{}, t reflect.Type) reflect.Value {
rv := reflect.ValueOf(v)
tv := rv.Type()
if tv != t {
ppe("Value '%v' has type '%s' but expected '%s'.", v, tv, t)
}
return rv
}

View file

@ -1,28 +0,0 @@
package ty
import (
"reflect"
)
// TypeVariable is the underlying type of every type variable used in
// parametric types. It should not be used directly. Instead, use
//
// type myOwnTypeVariable TypeVariable
//
// to create your own type variable. For your convenience, this package
// defines some type variables for you. (e.g., `A`, `B`, `C`, ...)
type TypeVariable struct {
noImitation struct{}
}
// tyvarUnderlyingType is used to discover types that are type variables.
// Namely, any type variable must be convertible to `TypeVariable`.
var tyvarUnderlyingType = reflect.TypeOf(TypeVariable{})
type A TypeVariable
type B TypeVariable
type C TypeVariable
type D TypeVariable
type E TypeVariable
type F TypeVariable
type G TypeVariable

View file

@ -1,476 +0,0 @@
package boltdb
import (
"bytes"
"encoding/binary"
"errors"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/abronan/valkeyrie"
"github.com/abronan/valkeyrie/store"
"github.com/coreos/bbolt"
)
var (
// ErrMultipleEndpointsUnsupported is thrown when multiple endpoints specified for
// BoltDB. Endpoint has to be a local file path
ErrMultipleEndpointsUnsupported = errors.New("boltdb supports one endpoint and should be a file path")
// ErrBoltBucketOptionMissing is thrown when boltBcuket config option is missing
ErrBoltBucketOptionMissing = errors.New("boltBucket config option missing")
)
const (
filePerm os.FileMode = 0644
)
//BoltDB type implements the Store interface
type BoltDB struct {
client *bolt.DB
boltBucket []byte
dbIndex uint64
path string
timeout time.Duration
// By default valkeyrie opens and closes the bolt DB connection for every
// get/put operation. This allows multiple apps to use a Bolt DB at the
// same time.
// PersistConnection flag provides an option to override ths behavior.
// ie: open the connection in New and use it till Close is called.
PersistConnection bool
sync.Mutex
}
const (
metadatalen = 8
transientTimeout = time.Duration(10) * time.Second
)
// Register registers boltdb to valkeyrie
func Register() {
valkeyrie.AddStore(store.BOLTDB, New)
}
// New opens a new BoltDB connection to the specified path and bucket
func New(endpoints []string, options *store.Config) (store.Store, error) {
var (
db *bolt.DB
err error
boltOptions *bolt.Options
timeout = transientTimeout
)
if len(endpoints) > 1 {
return nil, ErrMultipleEndpointsUnsupported
}
if (options == nil) || (len(options.Bucket) == 0) {
return nil, ErrBoltBucketOptionMissing
}
dir, _ := filepath.Split(endpoints[0])
if err = os.MkdirAll(dir, 0750); err != nil {
return nil, err
}
if options.PersistConnection {
boltOptions = &bolt.Options{Timeout: options.ConnectionTimeout}
db, err = bolt.Open(endpoints[0], filePerm, boltOptions)
if err != nil {
return nil, err
}
}
if options.ConnectionTimeout != 0 {
timeout = options.ConnectionTimeout
}
b := &BoltDB{
client: db,
path: endpoints[0],
boltBucket: []byte(options.Bucket),
timeout: timeout,
PersistConnection: options.PersistConnection,
}
return b, nil
}
func (b *BoltDB) reset() {
b.path = ""
b.boltBucket = []byte{}
}
func (b *BoltDB) getDBhandle() (*bolt.DB, error) {
var (
db *bolt.DB
err error
)
if !b.PersistConnection {
boltOptions := &bolt.Options{Timeout: b.timeout}
if db, err = bolt.Open(b.path, filePerm, boltOptions); err != nil {
return nil, err
}
b.client = db
}
return b.client, nil
}
func (b *BoltDB) releaseDBhandle() {
if !b.PersistConnection {
b.client.Close()
}
}
// Get the value at "key". BoltDB doesn't provide an inbuilt last modified index with every kv pair. Its implemented by
// by a atomic counter maintained by the valkeyrie and appened to the value passed by the client.
func (b *BoltDB) Get(key string, opts *store.ReadOptions) (*store.KVPair, error) {
var (
val []byte
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
if db, err = b.getDBhandle(); err != nil {
return nil, err
}
defer b.releaseDBhandle()
err = db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
return store.ErrKeyNotFound
}
v := bucket.Get([]byte(key))
val = make([]byte, len(v))
copy(val, v)
return nil
})
if len(val) == 0 {
return nil, store.ErrKeyNotFound
}
if err != nil {
return nil, err
}
dbIndex := binary.LittleEndian.Uint64(val[:metadatalen])
val = val[metadatalen:]
return &store.KVPair{Key: key, Value: val, LastIndex: (dbIndex)}, nil
}
//Put the key, value pair. index number metadata is prepended to the value
func (b *BoltDB) Put(key string, value []byte, opts *store.WriteOptions) error {
var (
dbIndex uint64
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
dbval := make([]byte, metadatalen)
if db, err = b.getDBhandle(); err != nil {
return err
}
defer b.releaseDBhandle()
err = db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(b.boltBucket)
if err != nil {
return err
}
dbIndex = atomic.AddUint64(&b.dbIndex, 1)
binary.LittleEndian.PutUint64(dbval, dbIndex)
dbval = append(dbval, value...)
err = bucket.Put([]byte(key), dbval)
if err != nil {
return err
}
return nil
})
return err
}
//Delete the value for the given key.
func (b *BoltDB) Delete(key string) error {
var (
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
if db, err = b.getDBhandle(); err != nil {
return err
}
defer b.releaseDBhandle()
err = db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
return store.ErrKeyNotFound
}
err := bucket.Delete([]byte(key))
return err
})
return err
}
// Exists checks if the key exists inside the store
func (b *BoltDB) Exists(key string, opts *store.ReadOptions) (bool, error) {
var (
val []byte
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
if db, err = b.getDBhandle(); err != nil {
return false, err
}
defer b.releaseDBhandle()
err = db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
return store.ErrKeyNotFound
}
val = bucket.Get([]byte(key))
return nil
})
if len(val) == 0 {
return false, err
}
return true, err
}
// List returns the range of keys starting with the passed in prefix
func (b *BoltDB) List(keyPrefix string, opts *store.ReadOptions) ([]*store.KVPair, error) {
var (
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
kv := []*store.KVPair{}
if db, err = b.getDBhandle(); err != nil {
return nil, err
}
defer b.releaseDBhandle()
hasResult := false
err = db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
return store.ErrKeyNotFound
}
cursor := bucket.Cursor()
prefix := []byte(keyPrefix)
for key, v := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, v = cursor.Next() {
hasResult = true
dbIndex := binary.LittleEndian.Uint64(v[:metadatalen])
v = v[metadatalen:]
val := make([]byte, len(v))
copy(val, v)
if string(key) != keyPrefix {
kv = append(kv, &store.KVPair{
Key: string(key),
Value: val,
LastIndex: dbIndex,
})
}
}
return nil
})
if !hasResult {
return nil, store.ErrKeyNotFound
}
return kv, err
}
// AtomicDelete deletes a value at "key" if the key
// has not been modified in the meantime, throws an
// error if this is the case
func (b *BoltDB) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
var (
val []byte
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
if previous == nil {
return false, store.ErrPreviousNotSpecified
}
if db, err = b.getDBhandle(); err != nil {
return false, err
}
defer b.releaseDBhandle()
err = db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
return store.ErrKeyNotFound
}
val = bucket.Get([]byte(key))
if val == nil {
return store.ErrKeyNotFound
}
dbIndex := binary.LittleEndian.Uint64(val[:metadatalen])
if dbIndex != previous.LastIndex {
return store.ErrKeyModified
}
err := bucket.Delete([]byte(key))
return err
})
if err != nil {
return false, err
}
return true, err
}
// AtomicPut puts a value at "key" if the key has not been
// modified since the last Put, throws an error if this is the case
func (b *BoltDB) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) {
var (
val []byte
dbIndex uint64
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
dbval := make([]byte, metadatalen)
if db, err = b.getDBhandle(); err != nil {
return false, nil, err
}
defer b.releaseDBhandle()
err = db.Update(func(tx *bolt.Tx) error {
var err error
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
if previous != nil {
return store.ErrKeyNotFound
}
bucket, err = tx.CreateBucket(b.boltBucket)
if err != nil {
return err
}
}
// AtomicPut is equivalent to Put if previous is nil and the Ky
// doesn't exist in the DB.
val = bucket.Get([]byte(key))
if previous == nil && len(val) != 0 {
return store.ErrKeyExists
}
if previous != nil {
if len(val) == 0 {
return store.ErrKeyNotFound
}
dbIndex = binary.LittleEndian.Uint64(val[:metadatalen])
if dbIndex != previous.LastIndex {
return store.ErrKeyModified
}
}
dbIndex = atomic.AddUint64(&b.dbIndex, 1)
binary.LittleEndian.PutUint64(dbval, b.dbIndex)
dbval = append(dbval, value...)
return (bucket.Put([]byte(key), dbval))
})
if err != nil {
return false, nil, err
}
updated := &store.KVPair{
Key: key,
Value: value,
LastIndex: dbIndex,
}
return true, updated, nil
}
// Close the db connection to the BoltDB
func (b *BoltDB) Close() {
b.Lock()
defer b.Unlock()
if !b.PersistConnection {
b.reset()
} else {
b.client.Close()
}
return
}
// DeleteTree deletes a range of keys with a given prefix
func (b *BoltDB) DeleteTree(keyPrefix string) error {
var (
db *bolt.DB
err error
)
b.Lock()
defer b.Unlock()
if db, err = b.getDBhandle(); err != nil {
return err
}
defer b.releaseDBhandle()
err = db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(b.boltBucket)
if bucket == nil {
return store.ErrKeyNotFound
}
cursor := bucket.Cursor()
prefix := []byte(keyPrefix)
for key, _ := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, _ = cursor.Next() {
_ = bucket.Delete([]byte(key))
}
return nil
})
return err
}
// NewLock has to implemented at the library level since its not supported by BoltDB
func (b *BoltDB) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
return nil, store.ErrCallNotSupported
}
// Watch has to implemented at the library level since its not supported by BoltDB
func (b *BoltDB) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
return nil, store.ErrCallNotSupported
}
// WatchTree has to implemented at the library level since its not supported by BoltDB
func (b *BoltDB) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
return nil, store.ErrCallNotSupported
}

View file

@ -1,575 +0,0 @@
package consul
import (
"crypto/tls"
"errors"
"net/http"
"strings"
"sync"
"time"
"github.com/abronan/valkeyrie"
"github.com/abronan/valkeyrie/store"
api "github.com/hashicorp/consul/api"
)
const (
// DefaultWatchWaitTime is how long we block for at a
// time to check if the watched key has changed. This
// affects the minimum time it takes to cancel a watch.
DefaultWatchWaitTime = 15 * time.Second
// RenewSessionRetryMax is the number of time we should try
// to renew the session before giving up and throwing an error
RenewSessionRetryMax = 5
// MaxSessionDestroyAttempts is the maximum times we will try
// to explicitly destroy the session attached to a lock after
// the connectivity to the store has been lost
MaxSessionDestroyAttempts = 5
// defaultLockTTL is the default ttl for the consul lock
defaultLockTTL = 20 * time.Second
)
var (
// ErrMultipleEndpointsUnsupported is thrown when there are
// multiple endpoints specified for Consul
ErrMultipleEndpointsUnsupported = errors.New("consul does not support multiple endpoints")
// ErrSessionRenew is thrown when the session can't be
// renewed because the Consul version does not support sessions
ErrSessionRenew = errors.New("cannot set or renew session for ttl, unable to operate on sessions")
)
// Consul is the receiver type for the
// Store interface
type Consul struct {
sync.Mutex
config *api.Config
client *api.Client
}
type consulLock struct {
lock *api.Lock
renewCh chan struct{}
}
// Register registers consul to valkeyrie
func Register() {
valkeyrie.AddStore(store.CONSUL, New)
}
// New creates a new Consul client given a list
// of endpoints and optional tls config
func New(endpoints []string, options *store.Config) (store.Store, error) {
if len(endpoints) > 1 {
return nil, ErrMultipleEndpointsUnsupported
}
s := &Consul{}
// Create Consul client
config := api.DefaultConfig()
s.config = config
config.HttpClient = http.DefaultClient
config.Address = endpoints[0]
// Set options
if options != nil {
if options.TLS != nil {
s.setTLS(options.TLS)
}
if options.ConnectionTimeout != 0 {
s.setTimeout(options.ConnectionTimeout)
}
}
// Creates a new client
client, err := api.NewClient(config)
if err != nil {
return nil, err
}
s.client = client
return s, nil
}
// SetTLS sets Consul TLS options
func (s *Consul) setTLS(tls *tls.Config) {
s.config.HttpClient.Transport = &http.Transport{
TLSClientConfig: tls,
}
s.config.Scheme = "https"
}
// SetTimeout sets the timeout for connecting to Consul
func (s *Consul) setTimeout(time time.Duration) {
s.config.WaitTime = time
}
// Normalize the key for usage in Consul
func (s *Consul) normalize(key string) string {
key = store.Normalize(key)
return strings.TrimPrefix(key, "/")
}
func (s *Consul) renewSession(pair *api.KVPair, ttl time.Duration) error {
// Check if there is any previous session with an active TTL
session, err := s.getActiveSession(pair.Key)
if err != nil {
return err
}
if session == "" {
entry := &api.SessionEntry{
Behavior: api.SessionBehaviorDelete, // Delete the key when the session expires
TTL: (ttl / 2).String(), // Consul multiplies the TTL by 2x
LockDelay: 1 * time.Millisecond, // Virtually disable lock delay
}
// Create the key session
session, _, err = s.client.Session().Create(entry, nil)
if err != nil {
return err
}
lockOpts := &api.LockOptions{
Key: pair.Key,
Session: session,
}
// Lock and ignore if lock is held
// It's just a placeholder for the
// ephemeral behavior
lock, _ := s.client.LockOpts(lockOpts)
if lock != nil {
lock.Lock(nil)
}
}
_, _, err = s.client.Session().Renew(session, nil)
return err
}
// getActiveSession checks if the key already has
// a session attached
func (s *Consul) getActiveSession(key string) (string, error) {
pair, _, err := s.client.KV().Get(key, nil)
if err != nil {
return "", err
}
if pair != nil && pair.Session != "" {
return pair.Session, nil
}
return "", nil
}
// Get the value at "key", returns the last modified index
// to use in conjunction to CAS calls
func (s *Consul) Get(key string, opts *store.ReadOptions) (*store.KVPair, error) {
options := &api.QueryOptions{
AllowStale: false,
RequireConsistent: true,
}
// Get options
if opts != nil {
options.RequireConsistent = opts.Consistent
}
pair, meta, err := s.client.KV().Get(s.normalize(key), options)
if err != nil {
return nil, err
}
// If pair is nil then the key does not exist
if pair == nil {
return nil, store.ErrKeyNotFound
}
return &store.KVPair{Key: pair.Key, Value: pair.Value, LastIndex: meta.LastIndex}, nil
}
// Put a value at "key"
func (s *Consul) Put(key string, value []byte, opts *store.WriteOptions) error {
key = s.normalize(key)
p := &api.KVPair{
Key: key,
Value: value,
Flags: api.LockFlagValue,
}
if opts != nil && opts.TTL > 0 {
// Create or renew a session holding a TTL. Operations on sessions
// are not deterministic: creating or renewing a session can fail
for retry := 1; retry <= RenewSessionRetryMax; retry++ {
err := s.renewSession(p, opts.TTL)
if err == nil {
break
}
if retry == RenewSessionRetryMax {
return ErrSessionRenew
}
}
}
_, err := s.client.KV().Put(p, nil)
return err
}
// Delete a value at "key"
func (s *Consul) Delete(key string) error {
if _, err := s.Get(key, nil); err != nil {
return err
}
_, err := s.client.KV().Delete(s.normalize(key), nil)
return err
}
// Exists checks that the key exists inside the store
func (s *Consul) Exists(key string, opts *store.ReadOptions) (bool, error) {
_, err := s.Get(key, opts)
if err != nil {
if err == store.ErrKeyNotFound {
return false, nil
}
return false, err
}
return true, nil
}
// List child nodes of a given directory
func (s *Consul) List(directory string, opts *store.ReadOptions) ([]*store.KVPair, error) {
options := &api.QueryOptions{
AllowStale: false,
RequireConsistent: true,
}
if opts != nil {
if !opts.Consistent {
options.AllowStale = true
options.RequireConsistent = false
}
}
pairs, _, err := s.client.KV().List(s.normalize(directory), options)
if err != nil {
return nil, err
}
if len(pairs) == 0 {
return nil, store.ErrKeyNotFound
}
kv := []*store.KVPair{}
for _, pair := range pairs {
if pair.Key == directory {
continue
}
kv = append(kv, &store.KVPair{
Key: pair.Key,
Value: pair.Value,
LastIndex: pair.ModifyIndex,
})
}
return kv, nil
}
// DeleteTree deletes a range of keys under a given directory
func (s *Consul) DeleteTree(directory string) error {
if _, err := s.List(directory, nil); err != nil {
return err
}
_, err := s.client.KV().DeleteTree(s.normalize(directory), nil)
return err
}
// Watch for changes on a "key"
// It returns a channel that will receive changes or pass
// on errors. Upon creation, the current value will first
// be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *Consul) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
kv := s.client.KV()
watchCh := make(chan *store.KVPair)
go func() {
defer close(watchCh)
// Use a wait time in order to check if we should quit
// from time to time.
opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
for {
// Check if we should quit
select {
case <-stopCh:
return
default:
}
// Get the key
pair, meta, err := kv.Get(key, opts)
if err != nil {
return
}
// If LastIndex didn't change then it means `Get` returned
// because of the WaitTime and the key didn't changed.
if opts.WaitIndex == meta.LastIndex {
continue
}
opts.WaitIndex = meta.LastIndex
// Return the value to the channel
if pair != nil {
watchCh <- &store.KVPair{
Key: pair.Key,
Value: pair.Value,
LastIndex: pair.ModifyIndex,
}
}
}
}()
return watchCh, nil
}
// WatchTree watches for changes on a "directory"
// It returns a channel that will receive changes or pass
// on errors. Upon creating a watch, the current childs values
// will be sent to the channel .Providing a non-nil stopCh can
// be used to stop watching.
func (s *Consul) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
kv := s.client.KV()
watchCh := make(chan []*store.KVPair)
go func() {
defer close(watchCh)
// Use a wait time in order to check if we should quit
// from time to time.
opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
for {
// Check if we should quit
select {
case <-stopCh:
return
default:
}
// Get all the childrens
pairs, meta, err := kv.List(directory, opts)
if err != nil {
return
}
// If LastIndex didn't change then it means `Get` returned
// because of the WaitTime and the child keys didn't change.
if opts.WaitIndex == meta.LastIndex {
continue
}
opts.WaitIndex = meta.LastIndex
// Return children KV pairs to the channel
kvpairs := []*store.KVPair{}
for _, pair := range pairs {
if pair.Key == directory {
continue
}
kvpairs = append(kvpairs, &store.KVPair{
Key: pair.Key,
Value: pair.Value,
LastIndex: pair.ModifyIndex,
})
}
watchCh <- kvpairs
}
}()
return watchCh, nil
}
// NewLock returns a handle to a lock struct which can
// be used to provide mutual exclusion on a key
func (s *Consul) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
lockOpts := &api.LockOptions{
Key: s.normalize(key),
}
lock := &consulLock{}
ttl := defaultLockTTL
if options != nil {
// Set optional TTL on Lock
if options.TTL != 0 {
ttl = options.TTL
}
// Set optional value on Lock
if options.Value != nil {
lockOpts.Value = options.Value
}
}
entry := &api.SessionEntry{
Behavior: api.SessionBehaviorRelease, // Release the lock when the session expires
TTL: (ttl / 2).String(), // Consul multiplies the TTL by 2x
LockDelay: 1 * time.Millisecond, // Virtually disable lock delay
}
// Create the key session
session, _, err := s.client.Session().Create(entry, nil)
if err != nil {
return nil, err
}
// Place the session and renew chan on lock
lockOpts.Session = session
lock.renewCh = options.RenewLock
l, err := s.client.LockOpts(lockOpts)
if err != nil {
return nil, err
}
// Renew the session ttl lock periodically
s.renewLockSession(entry.TTL, session, options.RenewLock)
lock.lock = l
return lock, nil
}
// renewLockSession is used to renew a session Lock, it takes
// a stopRenew chan which is used to explicitly stop the session
// renew process. The renew routine never stops until a signal is
// sent to this channel. If deleting the session fails because the
// connection to the store is lost, it keeps trying to delete the
// session periodically until it can contact the store, this ensures
// that the lock is not maintained indefinitely which ensures liveness
// over safety for the lock when the store becomes unavailable.
func (s *Consul) renewLockSession(initialTTL string, id string, stopRenew chan struct{}) {
sessionDestroyAttempts := 0
ttl, err := time.ParseDuration(initialTTL)
if err != nil {
return
}
go func() {
for {
select {
case <-time.After(ttl / 2):
entry, _, err := s.client.Session().Renew(id, nil)
if err != nil {
// If an error occurs, continue until the
// session gets destroyed explicitly or
// the session ttl times out
continue
}
if entry == nil {
return
}
// Handle the server updating the TTL
ttl, _ = time.ParseDuration(entry.TTL)
case <-stopRenew:
// Attempt a session destroy
_, err := s.client.Session().Destroy(id, nil)
if err == nil {
return
}
// We cannot destroy the session because the store
// is unavailable, wait for the session renew period.
// Give up after 'MaxSessionDestroyAttempts'.
sessionDestroyAttempts++
if sessionDestroyAttempts >= MaxSessionDestroyAttempts {
return
}
time.Sleep(ttl / 2)
}
}
}()
}
// Lock attempts to acquire the lock and blocks while
// doing so. It returns a channel that is closed if our
// lock is lost or if an error occurs
func (l *consulLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
return l.lock.Lock(stopChan)
}
// Unlock the "key". Calling unlock while
// not holding the lock will throw an error
func (l *consulLock) Unlock() error {
if l.renewCh != nil {
close(l.renewCh)
}
return l.lock.Unlock()
}
// AtomicPut put a value at "key" if the key has not been
// modified in the meantime, throws an error if this is the case
func (s *Consul) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) {
p := &api.KVPair{Key: s.normalize(key), Value: value, Flags: api.LockFlagValue}
if previous == nil {
// Consul interprets ModifyIndex = 0 as new key.
p.ModifyIndex = 0
} else {
p.ModifyIndex = previous.LastIndex
}
ok, _, err := s.client.KV().CAS(p, nil)
if err != nil {
return false, nil, err
}
if !ok {
if previous == nil {
return false, nil, store.ErrKeyExists
}
return false, nil, store.ErrKeyModified
}
pair, err := s.Get(key, nil)
if err != nil {
return false, nil, err
}
return true, pair, nil
}
// AtomicDelete deletes a value at "key" if the key has not
// been modified in the meantime, throws an error if this is the case
func (s *Consul) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
if previous == nil {
return false, store.ErrPreviousNotSpecified
}
p := &api.KVPair{Key: s.normalize(key), ModifyIndex: previous.LastIndex, Flags: api.LockFlagValue}
// Extra Get operation to check on the key
_, err := s.Get(key, nil)
if err != nil && err == store.ErrKeyNotFound {
return false, err
}
if work, _, err := s.client.KV().DeleteCAS(p, nil); err != nil {
return false, err
} else if !work {
return false, store.ErrKeyModified
}
return true, nil
}
// Close closes the client connection
func (s *Consul) Close() {
return
}

View file

@ -1,534 +0,0 @@
package etcdv3
import (
"context"
"crypto/tls"
"strings"
"sync"
"time"
"github.com/abronan/valkeyrie"
"github.com/abronan/valkeyrie/store"
etcd "github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
)
const (
defaultLockTTL = 20 * time.Second
etcdDefaultTimeout = 5 * time.Second
lockSuffix = "___lock"
)
// EtcdV3 is the receiver type for the
// Store interface
type EtcdV3 struct {
client *etcd.Client
}
type etcdLock struct {
lock sync.Mutex
store *EtcdV3
mutex *concurrency.Mutex
session *concurrency.Session
mutexKey string // mutexKey is the key to write appended with a "_lock" suffix
writeKey string // writeKey is the actual key to update protected by the mutexKey
value string
ttl time.Duration
}
// Register registers etcd to valkeyrie
func Register() {
valkeyrie.AddStore(store.ETCDV3, New)
}
// New creates a new Etcd client given a list
// of endpoints and an optional tls config
func New(addrs []string, options *store.Config) (store.Store, error) {
s := &EtcdV3{}
var (
entries []string
err error
)
entries = store.CreateEndpoints(addrs, "http")
cfg := &etcd.Config{
Endpoints: entries,
}
// Set options
if options != nil {
if options.TLS != nil {
setTLS(cfg, options.TLS, addrs)
}
if options.ConnectionTimeout != 0 {
setTimeout(cfg, options.ConnectionTimeout)
}
if options.Username != "" {
setCredentials(cfg, options.Username, options.Password)
}
if options.SyncPeriod != 0 {
cfg.AutoSyncInterval = options.SyncPeriod
}
}
s.client, err = etcd.New(*cfg)
if err != nil {
return nil, err
}
return s, nil
}
// setTLS sets the tls configuration given a tls.Config scheme
func setTLS(cfg *etcd.Config, tls *tls.Config, addrs []string) {
entries := store.CreateEndpoints(addrs, "https")
cfg.Endpoints = entries
cfg.TLS = tls
}
// setTimeout sets the timeout used for connecting to the store
func setTimeout(cfg *etcd.Config, time time.Duration) {
cfg.DialTimeout = time
}
// setCredentials sets the username/password credentials for connecting to Etcd
func setCredentials(cfg *etcd.Config, username, password string) {
cfg.Username = username
cfg.Password = password
}
// Normalize the key for usage in Etcd
func (s *EtcdV3) normalize(key string) string {
key = store.Normalize(key)
return strings.TrimPrefix(key, "/")
}
// Get the value at "key", returns the last modified
// index to use in conjunction to Atomic calls
func (s *EtcdV3) Get(key string, opts *store.ReadOptions) (pair *store.KVPair, err error) {
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
var result *etcd.GetResponse
if opts != nil && !opts.Consistent {
result, err = s.client.KV.Get(ctx, s.normalize(key), etcd.WithSerializable())
} else {
result, err = s.client.KV.Get(ctx, s.normalize(key))
}
cancel()
if err != nil {
return nil, err
}
if result.Count == 0 {
return nil, store.ErrKeyNotFound
}
kvs := []*store.KVPair{}
for _, pair := range result.Kvs {
kvs = append(kvs, &store.KVPair{
Key: string(pair.Key),
Value: []byte(pair.Value),
LastIndex: uint64(pair.ModRevision),
})
}
return kvs[0], nil
}
// Put a value at "key"
func (s *EtcdV3) Put(key string, value []byte, opts *store.WriteOptions) (err error) {
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
pr := s.client.Txn(ctx)
if opts != nil && opts.TTL > 0 {
lease := etcd.NewLease(s.client)
resp, err := lease.Grant(context.Background(), int64(opts.TTL/time.Second))
if err != nil {
cancel()
return err
}
pr.Then(etcd.OpPut(key, string(value), etcd.WithLease(resp.ID)))
} else {
pr.Then(etcd.OpPut(key, string(value)))
}
_, err = pr.Commit()
cancel()
if err != nil {
return err
}
return nil
}
// Delete a value at "key"
func (s *EtcdV3) Delete(key string) error {
resp, err := s.client.KV.Delete(context.Background(), s.normalize(key))
if resp.Deleted == 0 {
return store.ErrKeyNotFound
}
return err
}
// Exists checks if the key exists inside the store
func (s *EtcdV3) Exists(key string, opts *store.ReadOptions) (bool, error) {
_, err := s.Get(key, opts)
if err != nil {
if err == store.ErrKeyNotFound {
return false, nil
}
return false, err
}
return true, nil
}
// Watch for changes on a "key"
// It returns a channel that will receive changes or pass
// on errors. Upon creation, the current value will first
// be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *EtcdV3) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
wc := etcd.NewWatcher(s.client)
// respCh is sending back events to the caller
respCh := make(chan *store.KVPair)
// Get the current value
pair, err := s.Get(key, opts)
if err != nil {
return nil, err
}
go func() {
defer wc.Close()
defer close(respCh)
// Push the current value through the channel.
respCh <- pair
watchCh := wc.Watch(context.Background(), s.normalize(key))
for resp := range watchCh {
// Check if the watch was stopped by the caller
select {
case <-stopCh:
return
default:
}
for _, ev := range resp.Events {
respCh <- &store.KVPair{
Key: key,
Value: []byte(ev.Kv.Value),
LastIndex: uint64(ev.Kv.ModRevision),
}
}
}
}()
return respCh, nil
}
// WatchTree watches for changes on a "directory"
// It returns a channel that will receive changes or pass
// on errors. Upon creating a watch, the current childs values
// will be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *EtcdV3) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
wc := etcd.NewWatcher(s.client)
// respCh is sending back events to the caller
respCh := make(chan []*store.KVPair)
// Get the current value
rev, pairs, err := s.list(directory, opts)
if err != nil {
return nil, err
}
go func() {
defer wc.Close()
defer close(respCh)
// Push the current value through the channel.
respCh <- pairs
rev++
watchCh := wc.Watch(context.Background(), s.normalize(directory), etcd.WithPrefix(), etcd.WithRev(rev))
for resp := range watchCh {
// Check if the watch was stopped by the caller
select {
case <-stopCh:
return
default:
}
list := make([]*store.KVPair, len(resp.Events))
for i, ev := range resp.Events {
list[i] = &store.KVPair{
Key: string(ev.Kv.Key),
Value: []byte(ev.Kv.Value),
LastIndex: uint64(ev.Kv.ModRevision),
}
}
respCh <- list
}
}()
return respCh, nil
}
// AtomicPut puts a value at "key" if the key has not been
// modified in the meantime, throws an error if this is the case
func (s *EtcdV3) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) {
var cmp etcd.Cmp
var testIndex bool
if previous != nil {
// We compare on the last modified index
testIndex = true
cmp = etcd.Compare(etcd.ModRevision(key), "=", int64(previous.LastIndex))
} else {
// Previous key is not given, thus we want the key not to exist
testIndex = false
cmp = etcd.Compare(etcd.CreateRevision(key), "=", 0)
}
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
pr := s.client.Txn(ctx).If(cmp)
// We set the TTL if given
if opts != nil && opts.TTL > 0 {
lease := etcd.NewLease(s.client)
resp, err := lease.Grant(context.Background(), int64(opts.TTL/time.Second))
if err != nil {
cancel()
return false, nil, err
}
pr.Then(etcd.OpPut(key, string(value), etcd.WithLease(resp.ID)))
} else {
pr.Then(etcd.OpPut(key, string(value)))
}
txn, err := pr.Commit()
cancel()
if err != nil {
return false, nil, err
}
if !txn.Succeeded {
if testIndex {
return false, nil, store.ErrKeyModified
}
return false, nil, store.ErrKeyExists
}
updated := &store.KVPair{
Key: key,
Value: value,
LastIndex: uint64(txn.Header.Revision),
}
return true, updated, nil
}
// AtomicDelete deletes a value at "key" if the key
// has not been modified in the meantime, throws an
// error if this is the case
func (s *EtcdV3) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
if previous == nil {
return false, store.ErrPreviousNotSpecified
}
// We compare on the last modified index
cmp := etcd.Compare(etcd.ModRevision(key), "=", int64(previous.LastIndex))
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
txn, err := s.client.Txn(ctx).
If(cmp).
Then(etcd.OpDelete(key)).
Commit()
cancel()
if err != nil {
return false, err
}
if len(txn.Responses) == 0 {
return false, store.ErrKeyNotFound
}
if !txn.Succeeded {
return false, store.ErrKeyModified
}
return true, nil
}
// List child nodes of a given directory
func (s *EtcdV3) List(directory string, opts *store.ReadOptions) ([]*store.KVPair, error) {
_, kv, err := s.list(directory, opts)
return kv, err
}
// DeleteTree deletes a range of keys under a given directory
func (s *EtcdV3) DeleteTree(directory string) error {
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
resp, err := s.client.KV.Delete(ctx, s.normalize(directory), etcd.WithPrefix())
cancel()
if resp.Deleted == 0 {
return store.ErrKeyNotFound
}
return err
}
// NewLock returns a handle to a lock struct which can
// be used to provide mutual exclusion on a key
func (s *EtcdV3) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
var value string
ttl := defaultLockTTL
renewCh := make(chan struct{})
// Apply options on Lock
if options != nil {
if options.Value != nil {
value = string(options.Value)
}
if options.TTL != 0 {
ttl = options.TTL
}
if options.RenewLock != nil {
renewCh = options.RenewLock
}
}
// Create Session for Mutex
session, err := concurrency.NewSession(s.client, concurrency.WithTTL(int(ttl/time.Second)))
if err != nil {
return nil, err
}
go func() {
<-renewCh
session.Close()
return
}()
// A Mutex is a simple key that can only be held by a single process.
// An etcd mutex behaves like a Zookeeper lock: a side key is created with
// a suffix (such as "_lock") and represents the mutex. Thus we have a pair
// composed of the key to protect with a lock: "/key", and a side key that
// acts as the lock: "/key_lock"
mutexKey := s.normalize(key + lockSuffix)
writeKey := s.normalize(key)
// Create lock object
lock = &etcdLock{
store: s,
mutex: concurrency.NewMutex(session, mutexKey),
session: session,
mutexKey: mutexKey,
writeKey: writeKey,
value: value,
ttl: ttl,
}
return lock, nil
}
// Lock attempts to acquire the lock and blocks while
// doing so. It returns a channel that is closed if our
// lock is lost or if an error occurs
func (l *etcdLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
l.lock.Lock()
defer l.lock.Unlock()
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stopChan
cancel()
}()
err := l.mutex.Lock(ctx)
if err != nil {
if err == context.Canceled {
return nil, nil
}
return nil, err
}
err = l.store.Put(l.writeKey, []byte(l.value), nil)
if err != nil {
return nil, err
}
return l.session.Done(), nil
}
// Unlock the "key". Calling unlock while
// not holding the lock will throw an error
func (l *etcdLock) Unlock() error {
l.lock.Lock()
defer l.lock.Unlock()
return l.mutex.Unlock(context.Background())
}
// Close closes the client connection
func (s *EtcdV3) Close() {
s.client.Close()
}
// list child nodes of a given directory and return revision number
func (s *EtcdV3) list(directory string, opts *store.ReadOptions) (int64, []*store.KVPair, error) {
ctx, cancel := context.WithTimeout(context.Background(), etcdDefaultTimeout)
var resp *etcd.GetResponse
var err error
if opts != nil && !opts.Consistent {
resp, err = s.client.KV.Get(ctx, s.normalize(directory), etcd.WithSerializable(), etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortDescend))
} else {
resp, err = s.client.KV.Get(ctx, s.normalize(directory), etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortDescend))
}
cancel()
if err != nil {
return 0, nil, err
}
if resp.Count == 0 {
return 0, nil, store.ErrKeyNotFound
}
kv := []*store.KVPair{}
for _, n := range resp.Kvs {
if string(n.Key) == directory {
continue
}
// Filter out etcd mutex side keys with `___lock` suffix
if strings.Contains(string(n.Key), lockSuffix) {
continue
}
kv = append(kv, &store.KVPair{
Key: string(n.Key),
Value: []byte(n.Value),
LastIndex: uint64(n.ModRevision),
})
}
return resp.Header.Revision, kv, nil
}

View file

@ -1,593 +0,0 @@
package zookeeper
import (
"strings"
"time"
"github.com/abronan/valkeyrie"
"github.com/abronan/valkeyrie/store"
zk "github.com/samuel/go-zookeeper/zk"
)
const (
// SOH control character
SOH = "\x01"
defaultTimeout = 10 * time.Second
syncRetryLimit = 5
)
// Zookeeper is the receiver type for
// the Store interface
type Zookeeper struct {
timeout time.Duration
client *zk.Conn
}
type zookeeperLock struct {
client *zk.Conn
lock *zk.Lock
key string
value []byte
}
// Register registers zookeeper to valkeyrie
func Register() {
valkeyrie.AddStore(store.ZK, New)
}
// New creates a new Zookeeper client given a
// list of endpoints and an optional tls config
func New(endpoints []string, options *store.Config) (store.Store, error) {
s := &Zookeeper{}
s.timeout = defaultTimeout
// Set options
if options != nil {
if options.ConnectionTimeout != 0 {
s.setTimeout(options.ConnectionTimeout)
}
}
// Connect to Zookeeper
conn, _, err := zk.Connect(endpoints, s.timeout)
if err != nil {
return nil, err
}
s.client = conn
return s, nil
}
// setTimeout sets the timeout for connecting to Zookeeper
func (s *Zookeeper) setTimeout(time time.Duration) {
s.timeout = time
}
// Get the value at "key", returns the last modified index
// to use in conjunction to Atomic calls
func (s *Zookeeper) Get(key string, opts *store.ReadOptions) (pair *store.KVPair, err error) {
resp, meta, err := s.get(key)
if err != nil {
return nil, err
}
pair = &store.KVPair{
Key: key,
Value: resp,
LastIndex: uint64(meta.Version),
}
return pair, nil
}
// createFullPath creates the entire path for a directory
// that does not exist and sets the value of the last
// znode to data
func (s *Zookeeper) createFullPath(path []string, data []byte, ephemeral bool) error {
for i := 1; i <= len(path); i++ {
newpath := "/" + strings.Join(path[:i], "/")
if i == len(path) {
flag := 0
if ephemeral {
flag = zk.FlagEphemeral
}
_, err := s.client.Create(newpath, data, int32(flag), zk.WorldACL(zk.PermAll))
return err
}
_, err := s.client.Create(newpath, []byte{}, 0, zk.WorldACL(zk.PermAll))
if err != nil {
// Skip if node already exists
if err != zk.ErrNodeExists {
return err
}
}
}
return nil
}
// Put a value at "key"
func (s *Zookeeper) Put(key string, value []byte, opts *store.WriteOptions) error {
fkey := s.normalize(key)
exists, err := s.Exists(key, nil)
if err != nil {
return err
}
if !exists {
if opts != nil && opts.TTL > 0 {
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), value, true)
} else {
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), value, false)
}
} else {
_, err = s.client.Set(fkey, value, -1)
}
return err
}
// Delete a value at "key"
func (s *Zookeeper) Delete(key string) error {
err := s.client.Delete(s.normalize(key), -1)
if err == zk.ErrNoNode {
return store.ErrKeyNotFound
}
return err
}
// Exists checks if the key exists inside the store
func (s *Zookeeper) Exists(key string, opts *store.ReadOptions) (bool, error) {
exists, _, err := s.client.Exists(s.normalize(key))
if err != nil {
return false, err
}
return exists, nil
}
// Watch for changes on a "key"
// It returns a channel that will receive changes or pass
// on errors. Upon creation, the current value will first
// be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *Zookeeper) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
// Catch zk notifications and fire changes into the channel.
watchCh := make(chan *store.KVPair)
go func() {
defer close(watchCh)
var fireEvt = true
for {
resp, meta, eventCh, err := s.getW(key)
if err != nil {
return
}
if fireEvt {
watchCh <- &store.KVPair{
Key: key,
Value: resp,
LastIndex: uint64(meta.Version),
}
}
select {
case e := <-eventCh:
// Only fire an event if the data in the node changed.
// Simply reset the watch if this is any other event
// (e.g. a session event).
fireEvt = e.Type == zk.EventNodeDataChanged
case <-stopCh:
// There is no way to stop GetW so just quit
return
}
}
}()
return watchCh, nil
}
// WatchTree watches for changes on a "directory"
// It returns a channel that will receive changes or pass
// on errors. Upon creating a watch, the current childs values
// will be sent to the channel .Providing a non-nil stopCh can
// be used to stop watching.
func (s *Zookeeper) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
// Catch zk notifications and fire changes into the channel.
watchCh := make(chan []*store.KVPair)
go func() {
defer close(watchCh)
var fireEvt = true
for {
WATCH:
keys, _, eventCh, err := s.client.ChildrenW(s.normalize(directory))
if err != nil {
return
}
if fireEvt {
kvs, err := s.getListWithPath(directory, keys, opts)
if err != nil {
// Failed to get values for one or more of the keys,
// the list may be out of date so try again.
goto WATCH
}
watchCh <- kvs
}
select {
case e := <-eventCh:
// Only fire an event if the children have changed.
// Simply reset the watch if this is any other event
// (e.g. a session event).
fireEvt = e.Type == zk.EventNodeChildrenChanged
case <-stopCh:
// There is no way to stop ChildrenW so just quit
return
}
}
}()
return watchCh, nil
}
// listChildren lists the direct children of a directory
func (s *Zookeeper) listChildren(directory string) ([]string, error) {
children, _, err := s.client.Children(s.normalize(directory))
if err != nil {
if err == zk.ErrNoNode {
return nil, store.ErrKeyNotFound
}
return nil, err
}
return children, nil
}
// listChildrenRecursive lists the children of a directory as well as
// all the descending childs from sub-folders in a recursive fashion.
func (s *Zookeeper) listChildrenRecursive(list *[]string, directory string) error {
children, err := s.listChildren(directory)
if err != nil {
return err
}
// We reached a leaf.
if len(children) == 0 {
return nil
}
for _, c := range children {
c = strings.TrimSuffix(directory, "/") + "/" + c
err := s.listChildrenRecursive(list, c)
if err != nil && err != zk.ErrNoChildrenForEphemerals {
return err
}
*list = append(*list, c)
}
return nil
}
// List child nodes of a given directory
func (s *Zookeeper) List(directory string, opts *store.ReadOptions) ([]*store.KVPair, error) {
children := make([]string, 0)
err := s.listChildrenRecursive(&children, directory)
if err != nil {
return nil, err
}
kvs, err := s.getList(children, opts)
if err != nil {
// If node is not found: List is out of date, retry
if err == store.ErrKeyNotFound {
return s.List(directory, opts)
}
return nil, err
}
return kvs, nil
}
// DeleteTree deletes a range of keys under a given directory
func (s *Zookeeper) DeleteTree(directory string) error {
children, err := s.listChildren(directory)
if err != nil {
return err
}
var reqs []interface{}
for _, c := range children {
reqs = append(reqs, &zk.DeleteRequest{
Path: s.normalize(directory + "/" + c),
Version: -1,
})
}
_, err = s.client.Multi(reqs...)
return err
}
// AtomicPut put a value at "key" if the key has not been
// modified in the meantime, throws an error if this is the case
func (s *Zookeeper) AtomicPut(key string, value []byte, previous *store.KVPair, _ *store.WriteOptions) (bool, *store.KVPair, error) {
var lastIndex uint64
if previous != nil {
meta, err := s.client.Set(s.normalize(key), value, int32(previous.LastIndex))
if err != nil {
// Compare Failed
if err == zk.ErrBadVersion {
return false, nil, store.ErrKeyModified
}
return false, nil, err
}
lastIndex = uint64(meta.Version)
} else {
// Interpret previous == nil as create operation.
_, err := s.client.Create(s.normalize(key), value, 0, zk.WorldACL(zk.PermAll))
if err != nil {
// Directory does not exist
if err == zk.ErrNoNode {
// Create the directory
parts := store.SplitKey(strings.TrimSuffix(key, "/"))
parts = parts[:len(parts)-1]
if err = s.createFullPath(parts, []byte{}, false); err != nil {
// Failed to create the directory.
return false, nil, err
}
// Create the node
if _, err := s.client.Create(s.normalize(key), value, 0, zk.WorldACL(zk.PermAll)); err != nil {
// Node exist error (when previous nil)
if err == zk.ErrNodeExists {
return false, nil, store.ErrKeyExists
}
return false, nil, err
}
} else {
// Node Exists error (when previous nil)
if err == zk.ErrNodeExists {
return false, nil, store.ErrKeyExists
}
// Unhandled error
return false, nil, err
}
}
lastIndex = 0 // Newly created nodes have version 0.
}
pair := &store.KVPair{
Key: key,
Value: value,
LastIndex: lastIndex,
}
return true, pair, nil
}
// AtomicDelete deletes a value at "key" if the key
// has not been modified in the meantime, throws an
// error if this is the case
func (s *Zookeeper) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
if previous == nil {
return false, store.ErrPreviousNotSpecified
}
err := s.client.Delete(s.normalize(key), int32(previous.LastIndex))
if err != nil {
// Key not found
if err == zk.ErrNoNode {
return false, store.ErrKeyNotFound
}
// Compare failed
if err == zk.ErrBadVersion {
return false, store.ErrKeyModified
}
// General store error
return false, err
}
return true, nil
}
// NewLock returns a handle to a lock struct which can
// be used to provide mutual exclusion on a key
func (s *Zookeeper) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
value := []byte("")
// Apply options
if options != nil {
if options.Value != nil {
value = options.Value
}
}
lock = &zookeeperLock{
client: s.client,
key: s.normalize(key),
value: value,
lock: zk.NewLock(s.client, s.normalize(key), zk.WorldACL(zk.PermAll)),
}
return lock, err
}
// Lock attempts to acquire the lock and blocks while
// doing so. It returns a channel that is closed if our
// lock is lost or if an error occurs
func (l *zookeeperLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
err := l.lock.Lock()
lostCh := make(chan struct{})
if err == nil {
// We hold the lock, we can set our value
_, err = l.client.Set(l.key, l.value, -1)
if err == nil {
go l.monitorLock(stopChan, lostCh)
}
}
return lostCh, err
}
// Unlock the "key". Calling unlock while
// not holding the lock will throw an error
func (l *zookeeperLock) Unlock() error {
return l.lock.Unlock()
}
// Close closes the client connection
func (s *Zookeeper) Close() {
s.client.Close()
}
// Normalize the key for usage in Zookeeper
func (s *Zookeeper) normalize(key string) string {
key = store.Normalize(key)
return strings.TrimSuffix(key, "/")
}
func (l *zookeeperLock) monitorLock(stopCh <-chan struct{}, lostCh chan struct{}) {
defer close(lostCh)
for {
_, _, eventCh, err := l.client.GetW(l.key)
if err != nil {
// We failed to set watch, relinquish the lock
return
}
select {
case e := <-eventCh:
if e.Type == zk.EventNotWatching ||
(e.Type == zk.EventSession && e.State == zk.StateExpired) {
// Either the session has been closed and our watch has been
// invalidated or the session has expired.
return
} else if e.Type == zk.EventNodeDataChanged {
// Somemone else has written to the lock node and believes
// that they have the lock.
return
}
case <-stopCh:
// The caller has requested that we relinquish our lock
return
}
}
}
func (s *Zookeeper) get(key string) ([]byte, *zk.Stat, error) {
var resp []byte
var meta *zk.Stat
var err error
// To guard against older versions of valkeyrie
// creating and writing to znodes non-atomically,
// We try to resync few times if we read SOH or
// an empty string
for i := 0; i <= syncRetryLimit; i++ {
resp, meta, err = s.client.Get(s.normalize(key))
if err != nil {
if err == zk.ErrNoNode {
return nil, nil, store.ErrKeyNotFound
}
return nil, nil, err
}
if string(resp) != SOH && string(resp) != "" {
return resp, meta, nil
}
if i < syncRetryLimit {
if _, err = s.client.Sync(s.normalize(key)); err != nil {
return nil, nil, err
}
}
}
return resp, meta, nil
}
func (s *Zookeeper) getW(key string) ([]byte, *zk.Stat, <-chan zk.Event, error) {
var resp []byte
var meta *zk.Stat
var ech <-chan zk.Event
var err error
// To guard against older versions of valkeyrie
// creating and writing to znodes non-atomically,
// We try to resync few times if we read SOH or
// an empty string
for i := 0; i <= syncRetryLimit; i++ {
resp, meta, ech, err = s.client.GetW(s.normalize(key))
if err != nil {
if err == zk.ErrNoNode {
return nil, nil, nil, store.ErrKeyNotFound
}
return nil, nil, nil, err
}
if string(resp) != SOH && string(resp) != "" {
return resp, meta, ech, nil
}
if i < syncRetryLimit {
if _, err = s.client.Sync(s.normalize(key)); err != nil {
return nil, nil, nil, err
}
}
}
return resp, meta, ech, nil
}
// getListWithPath gets the key/value pairs for a list of keys under
// a given path.
//
// This is generally used when we get a list of child keys which
// are stripped out of their path (for example when using ChildrenW).
func (s *Zookeeper) getListWithPath(path string, keys []string, opts *store.ReadOptions) ([]*store.KVPair, error) {
kvs := []*store.KVPair{}
for _, key := range keys {
pair, err := s.Get(strings.TrimSuffix(path, "/")+s.normalize(key), opts)
if err != nil {
return nil, err
}
kvs = append(kvs, &store.KVPair{
Key: key,
Value: pair.Value,
LastIndex: pair.LastIndex,
})
}
return kvs, nil
}
// getList returns key/value pairs from a list of keys.
//
// This is generally used when we have a full list of keys with
// their full path included.
func (s *Zookeeper) getList(keys []string, opts *store.ReadOptions) ([]*store.KVPair, error) {
kvs := []*store.KVPair{}
for _, key := range keys {
pair, err := s.Get(strings.TrimSuffix(key, "/"), nil)
if err != nil {
return nil, err
}
kvs = append(kvs, &store.KVPair{
Key: key,
Value: pair.Value,
LastIndex: pair.LastIndex,
})
}
return kvs, nil
}

View file

@ -1,35 +0,0 @@
// Package ec2query provides serialization of AWS EC2 requests and responses.
package ec2query
//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go
import (
"net/url"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
)
// BuildHandler is a named request handler for building ec2query protocol requests
var BuildHandler = request.NamedHandler{Name: "awssdk.ec2query.Build", Fn: Build}
// Build builds a request for the EC2 protocol.
func Build(r *request.Request) {
body := url.Values{
"Action": {r.Operation.Name},
"Version": {r.ClientInfo.APIVersion},
}
if err := queryutil.Parse(body, r.Params, true); err != nil {
r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err)
}
if !r.IsPresigned() {
r.HTTPRequest.Method = "POST"
r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
r.SetBufferBody([]byte(body.Encode()))
} else { // This is a pre-signed request
r.HTTPRequest.Method = "GET"
r.HTTPRequest.URL.RawQuery = body.Encode()
}
}

View file

@ -1,63 +0,0 @@
package ec2query
//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go
import (
"encoding/xml"
"io"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
)
// UnmarshalHandler is a named request handler for unmarshaling ec2query protocol requests
var UnmarshalHandler = request.NamedHandler{Name: "awssdk.ec2query.Unmarshal", Fn: Unmarshal}
// UnmarshalMetaHandler is a named request handler for unmarshaling ec2query protocol request metadata
var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalMeta", Fn: UnmarshalMeta}
// UnmarshalErrorHandler is a named request handler for unmarshaling ec2query protocol request errors
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalError", Fn: UnmarshalError}
// Unmarshal unmarshals a response body for the EC2 protocol.
func Unmarshal(r *request.Request) {
defer r.HTTPResponse.Body.Close()
if r.DataFilled() {
decoder := xml.NewDecoder(r.HTTPResponse.Body)
err := xmlutil.UnmarshalXML(r.Data, decoder, "")
if err != nil {
r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err)
return
}
}
}
// UnmarshalMeta unmarshals response headers for the EC2 protocol.
func UnmarshalMeta(r *request.Request) {
// TODO implement unmarshaling of request IDs
}
type xmlErrorResponse struct {
XMLName xml.Name `xml:"Response"`
Code string `xml:"Errors>Error>Code"`
Message string `xml:"Errors>Error>Message"`
RequestID string `xml:"RequestID"`
}
// UnmarshalError unmarshals a response error for the EC2 protocol.
func UnmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
resp := &xmlErrorResponse{}
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
if err != nil && err != io.EOF {
r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err)
} else {
r.Error = awserr.NewRequestFailure(
awserr.New(resp.Code, resp.Message, nil),
r.HTTPResponse.StatusCode,
resp.RequestID,
)
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,109 +0,0 @@
package dynamodb
import (
"bytes"
"hash/crc32"
"io"
"io/ioutil"
"math"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
)
type retryer struct {
client.DefaultRetryer
}
func (d retryer) RetryRules(r *request.Request) time.Duration {
delay := time.Duration(math.Pow(2, float64(r.RetryCount))) * 50
return delay * time.Millisecond
}
func init() {
initClient = func(c *client.Client) {
if c.Config.Retryer == nil {
// Only override the retryer with a custom one if the config
// does not already contain a retryer
setCustomRetryer(c)
}
c.Handlers.Build.PushBack(disableCompression)
c.Handlers.Unmarshal.PushFront(validateCRC32)
}
}
func setCustomRetryer(c *client.Client) {
maxRetries := aws.IntValue(c.Config.MaxRetries)
if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
maxRetries = 10
}
c.Retryer = retryer{
DefaultRetryer: client.DefaultRetryer{
NumMaxRetries: maxRetries,
},
}
}
func drainBody(b io.ReadCloser, length int64) (out *bytes.Buffer, err error) {
if length < 0 {
length = 0
}
buf := bytes.NewBuffer(make([]byte, 0, length))
if _, err = buf.ReadFrom(b); err != nil {
return nil, err
}
if err = b.Close(); err != nil {
return nil, err
}
return buf, nil
}
func disableCompression(r *request.Request) {
r.HTTPRequest.Header.Set("Accept-Encoding", "identity")
}
func validateCRC32(r *request.Request) {
if r.Error != nil {
return // already have an error, no need to verify CRC
}
// Checksum validation is off, skip
if aws.BoolValue(r.Config.DisableComputeChecksums) {
return
}
// Try to get CRC from response
header := r.HTTPResponse.Header.Get("X-Amz-Crc32")
if header == "" {
return // No header, skip
}
expected, err := strconv.ParseUint(header, 10, 32)
if err != nil {
return // Could not determine CRC value, skip
}
buf, err := drainBody(r.HTTPResponse.Body, r.HTTPResponse.ContentLength)
if err != nil { // failed to read the response body, skip
return
}
// Reset body for subsequent reads
r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes()))
// Compute the CRC checksum
crc := crc32.ChecksumIEEE(buf.Bytes())
if crc != uint32(expected) {
// CRC does not match, set a retryable error
r.Retryable = aws.Bool(true)
r.Error = awserr.New("CRC32CheckFailed", "CRC32 integrity check failed", nil)
}
}

View file

@ -1,45 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package dynamodb provides the client and types for making API
// requests to Amazon DynamoDB.
//
// Amazon DynamoDB is a fully managed NoSQL database service that provides fast
// and predictable performance with seamless scalability. DynamoDB lets you
// offload the administrative burdens of operating and scaling a distributed
// database, so that you don't have to worry about hardware provisioning, setup
// and configuration, replication, software patching, or cluster scaling.
//
// With DynamoDB, you can create database tables that can store and retrieve
// any amount of data, and serve any level of request traffic. You can scale
// up or scale down your tables' throughput capacity without downtime or performance
// degradation, and use the AWS Management Console to monitor resource utilization
// and performance metrics.
//
// DynamoDB automatically spreads the data and traffic for your tables over
// a sufficient number of servers to handle your throughput and storage requirements,
// while maintaining consistent and fast performance. All of your data is stored
// on solid state disks (SSDs) and automatically replicated across multiple
// Availability Zones in an AWS region, providing built-in high availability
// and data durability.
//
// See https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10 for more information on this service.
//
// See dynamodb package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/
//
// Using the Client
//
// To contact Amazon DynamoDB with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
//
// See the SDK's documentation for more information on how to use the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the Amazon DynamoDB client DynamoDB for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#New
package dynamodb

View file

@ -1,27 +0,0 @@
/*
AttributeValue Marshaling and Unmarshaling Helpers
Utility helpers to marshal and unmarshal AttributeValue to and
from Go types can be found in the dynamodbattribute sub package. This package
provides has specialized functions for the common ways of working with
AttributeValues. Such as map[string]*AttributeValue, []*AttributeValue, and
directly with *AttributeValue. This is helpful for marshaling Go types for API
operations such as PutItem, and unmarshaling Query and Scan APIs' responses.
See the dynamodbattribute package documentation for more information.
https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/dynamodbattribute/
Expression Builders
The expression package provides utility types and functions to build DynamoDB
expression for type safe construction of API ExpressionAttributeNames, and
ExpressionAttribute Values.
The package represents the various DynamoDB Expressions as structs named
accordingly. For example, ConditionBuilder represents a DynamoDB Condition
Expression, an UpdateBuilder represents a DynamoDB Update Expression, and so on.
See the expression package documentation for more information.
https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/expression/
*/
package dynamodb

View file

@ -1,443 +0,0 @@
package dynamodbattribute
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"runtime"
"strconv"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
// ConvertToMap accepts a map[string]interface{} or struct and converts it to a
// map[string]*dynamodb.AttributeValue.
//
// If in contains any structs, it is first JSON encoded/decoded it to convert it
// to a map[string]interface{}, so `json` struct tags are respected.
//
// Deprecated: Use MarshalMap instead
func ConvertToMap(in interface{}) (item map[string]*dynamodb.AttributeValue, err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(runtime.Error); ok {
err = e
} else if s, ok := r.(string); ok {
err = fmt.Errorf(s)
} else {
err = r.(error)
}
item = nil
}
}()
if in == nil {
return nil, awserr.New("SerializationError",
"in must be a map[string]interface{} or struct, got <nil>", nil)
}
v := reflect.ValueOf(in)
if v.Kind() != reflect.Struct && !(v.Kind() == reflect.Map && v.Type().Key().Kind() == reflect.String) {
return nil, awserr.New("SerializationError",
fmt.Sprintf("in must be a map[string]interface{} or struct, got %s",
v.Type().String()),
nil)
}
if isTyped(reflect.TypeOf(in)) {
var out map[string]interface{}
in = convertToUntyped(in, out)
}
item = make(map[string]*dynamodb.AttributeValue)
for k, v := range in.(map[string]interface{}) {
item[k] = convertTo(v)
}
return item, nil
}
// ConvertFromMap accepts a map[string]*dynamodb.AttributeValue and converts it to a
// map[string]interface{} or struct.
//
// If v points to a struct, the result is first converted it to a
// map[string]interface{}, then JSON encoded/decoded it to convert to a struct,
// so `json` struct tags are respected.
//
// Deprecated: Use UnmarshalMap instead
func ConvertFromMap(item map[string]*dynamodb.AttributeValue, v interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(runtime.Error); ok {
err = e
} else if s, ok := r.(string); ok {
err = fmt.Errorf(s)
} else {
err = r.(error)
}
item = nil
}
}()
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return awserr.New("SerializationError",
fmt.Sprintf("v must be a non-nil pointer to a map[string]interface{} or struct, got %s",
rv.Type()),
nil)
}
if rv.Elem().Kind() != reflect.Struct && !(rv.Elem().Kind() == reflect.Map && rv.Elem().Type().Key().Kind() == reflect.String) {
return awserr.New("SerializationError",
fmt.Sprintf("v must be a non-nil pointer to a map[string]interface{} or struct, got %s",
rv.Type()),
nil)
}
m := make(map[string]interface{})
for k, v := range item {
m[k] = convertFrom(v)
}
if isTyped(reflect.TypeOf(v)) {
err = convertToTyped(m, v)
} else {
rv.Elem().Set(reflect.ValueOf(m))
}
return err
}
// ConvertToList accepts an array or slice and converts it to a
// []*dynamodb.AttributeValue.
//
// Converting []byte fields to dynamodb.AttributeValue are only currently supported
// if the input is a map[string]interface{} type. []byte within typed structs are not
// converted correctly and are converted into base64 strings. This is a known bug,
// and will be fixed in a later release.
//
// If in contains any structs, it is first JSON encoded/decoded it to convert it
// to a []interface{}, so `json` struct tags are respected.
//
// Deprecated: Use MarshalList instead
func ConvertToList(in interface{}) (item []*dynamodb.AttributeValue, err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(runtime.Error); ok {
err = e
} else if s, ok := r.(string); ok {
err = fmt.Errorf(s)
} else {
err = r.(error)
}
item = nil
}
}()
if in == nil {
return nil, awserr.New("SerializationError",
"in must be an array or slice, got <nil>",
nil)
}
v := reflect.ValueOf(in)
if v.Kind() != reflect.Array && v.Kind() != reflect.Slice {
return nil, awserr.New("SerializationError",
fmt.Sprintf("in must be an array or slice, got %s",
v.Type().String()),
nil)
}
if isTyped(reflect.TypeOf(in)) {
var out []interface{}
in = convertToUntyped(in, out)
}
item = make([]*dynamodb.AttributeValue, 0, len(in.([]interface{})))
for _, v := range in.([]interface{}) {
item = append(item, convertTo(v))
}
return item, nil
}
// ConvertFromList accepts a []*dynamodb.AttributeValue and converts it to an array or
// slice.
//
// If v contains any structs, the result is first converted it to a
// []interface{}, then JSON encoded/decoded it to convert to a typed array or
// slice, so `json` struct tags are respected.
//
// Deprecated: Use UnmarshalList instead
func ConvertFromList(item []*dynamodb.AttributeValue, v interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(runtime.Error); ok {
err = e
} else if s, ok := r.(string); ok {
err = fmt.Errorf(s)
} else {
err = r.(error)
}
item = nil
}
}()
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return awserr.New("SerializationError",
fmt.Sprintf("v must be a non-nil pointer to an array or slice, got %s",
rv.Type()),
nil)
}
if rv.Elem().Kind() != reflect.Array && rv.Elem().Kind() != reflect.Slice {
return awserr.New("SerializationError",
fmt.Sprintf("v must be a non-nil pointer to an array or slice, got %s",
rv.Type()),
nil)
}
l := make([]interface{}, 0, len(item))
for _, v := range item {
l = append(l, convertFrom(v))
}
if isTyped(reflect.TypeOf(v)) {
err = convertToTyped(l, v)
} else {
rv.Elem().Set(reflect.ValueOf(l))
}
return err
}
// ConvertTo accepts any interface{} and converts it to a *dynamodb.AttributeValue.
//
// If in contains any structs, it is first JSON encoded/decoded it to convert it
// to a interface{}, so `json` struct tags are respected.
//
// Deprecated: Use Marshal instead
func ConvertTo(in interface{}) (item *dynamodb.AttributeValue, err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(runtime.Error); ok {
err = e
} else if s, ok := r.(string); ok {
err = fmt.Errorf(s)
} else {
err = r.(error)
}
item = nil
}
}()
if in != nil && isTyped(reflect.TypeOf(in)) {
var out interface{}
in = convertToUntyped(in, out)
}
item = convertTo(in)
return item, nil
}
// ConvertFrom accepts a *dynamodb.AttributeValue and converts it to any interface{}.
//
// If v contains any structs, the result is first converted it to a interface{},
// then JSON encoded/decoded it to convert to a struct, so `json` struct tags
// are respected.
//
// Deprecated: Use Unmarshal instead
func ConvertFrom(item *dynamodb.AttributeValue, v interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(runtime.Error); ok {
err = e
} else if s, ok := r.(string); ok {
err = fmt.Errorf(s)
} else {
err = r.(error)
}
item = nil
}
}()
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return awserr.New("SerializationError",
fmt.Sprintf("v must be a non-nil pointer to an interface{} or struct, got %s",
rv.Type()),
nil)
}
if rv.Elem().Kind() != reflect.Interface && rv.Elem().Kind() != reflect.Struct {
return awserr.New("SerializationError",
fmt.Sprintf("v must be a non-nil pointer to an interface{} or struct, got %s",
rv.Type()),
nil)
}
res := convertFrom(item)
if isTyped(reflect.TypeOf(v)) {
err = convertToTyped(res, v)
} else if res != nil {
rv.Elem().Set(reflect.ValueOf(res))
}
return err
}
func isTyped(v reflect.Type) bool {
switch v.Kind() {
case reflect.Struct:
return true
case reflect.Array, reflect.Slice:
if isTyped(v.Elem()) {
return true
}
case reflect.Map:
if isTyped(v.Key()) {
return true
}
if isTyped(v.Elem()) {
return true
}
case reflect.Ptr:
return isTyped(v.Elem())
}
return false
}
func convertToUntyped(in, out interface{}) interface{} {
b, err := json.Marshal(in)
if err != nil {
panic(err)
}
decoder := json.NewDecoder(bytes.NewReader(b))
decoder.UseNumber()
err = decoder.Decode(&out)
if err != nil {
panic(err)
}
return out
}
func convertToTyped(in, out interface{}) error {
b, err := json.Marshal(in)
if err != nil {
return err
}
decoder := json.NewDecoder(bytes.NewReader(b))
return decoder.Decode(&out)
}
func convertTo(in interface{}) *dynamodb.AttributeValue {
a := &dynamodb.AttributeValue{}
if in == nil {
a.NULL = new(bool)
*a.NULL = true
return a
}
if m, ok := in.(map[string]interface{}); ok {
a.M = make(map[string]*dynamodb.AttributeValue)
for k, v := range m {
a.M[k] = convertTo(v)
}
return a
}
v := reflect.ValueOf(in)
switch v.Kind() {
case reflect.Bool:
a.BOOL = new(bool)
*a.BOOL = v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
a.N = new(string)
*a.N = strconv.FormatInt(v.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
a.N = new(string)
*a.N = strconv.FormatUint(v.Uint(), 10)
case reflect.Float32, reflect.Float64:
a.N = new(string)
*a.N = strconv.FormatFloat(v.Float(), 'f', -1, 64)
case reflect.String:
if n, ok := in.(json.Number); ok {
a.N = new(string)
*a.N = n.String()
} else {
a.S = new(string)
*a.S = v.String()
}
case reflect.Slice:
switch v.Type() {
case reflect.TypeOf(([]byte)(nil)):
a.B = v.Bytes()
default:
a.L = make([]*dynamodb.AttributeValue, v.Len())
for i := 0; i < v.Len(); i++ {
a.L[i] = convertTo(v.Index(i).Interface())
}
}
default:
panic(fmt.Sprintf("the type %s is not supported", v.Type().String()))
}
return a
}
func convertFrom(a *dynamodb.AttributeValue) interface{} {
if a.S != nil {
return *a.S
}
if a.N != nil {
// Number is tricky b/c we don't know which numeric type to use. Here we
// simply try the different types from most to least restrictive.
if n, err := strconv.ParseInt(*a.N, 10, 64); err == nil {
return int(n)
}
if n, err := strconv.ParseUint(*a.N, 10, 64); err == nil {
return uint(n)
}
n, err := strconv.ParseFloat(*a.N, 64)
if err != nil {
panic(err)
}
return n
}
if a.BOOL != nil {
return *a.BOOL
}
if a.NULL != nil {
return nil
}
if a.M != nil {
m := make(map[string]interface{})
for k, v := range a.M {
m[k] = convertFrom(v)
}
return m
}
if a.L != nil {
l := make([]interface{}, len(a.L))
for index, v := range a.L {
l[index] = convertFrom(v)
}
return l
}
if a.B != nil {
return a.B
}
panic(fmt.Sprintf("%#v is not a supported dynamodb.AttributeValue", a))
}

View file

@ -1,761 +0,0 @@
package dynamodbattribute
import (
"fmt"
"reflect"
"strconv"
"time"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
// An Unmarshaler is an interface to provide custom unmarshaling of
// AttributeValues. Use this to provide custom logic determining
// how AttributeValues should be unmarshaled.
// type ExampleUnmarshaler struct {
// Value int
// }
//
// func (u *exampleUnmarshaler) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
// if av.N == nil {
// return nil
// }
//
// n, err := strconv.ParseInt(*av.N, 10, 0)
// if err != nil {
// return err
// }
//
// u.Value = n
// return nil
// }
type Unmarshaler interface {
UnmarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error
}
// Unmarshal will unmarshal DynamoDB AttributeValues to Go value types.
// Both generic interface{} and concrete types are valid unmarshal
// destination types.
//
// Unmarshal will allocate maps, slices, and pointers as needed to
// unmarshal the AttributeValue into the provided type value.
//
// When unmarshaling AttributeValues into structs Unmarshal matches
// the field names of the struct to the AttributeValue Map keys.
// Initially it will look for exact field name matching, but will
// fall back to case insensitive if not exact match is found.
//
// With the exception of omitempty, omitemptyelem, binaryset, numberset
// and stringset all struct tags used by Marshal are also used by
// Unmarshal.
//
// When decoding AttributeValues to interfaces Unmarshal will use the
// following types.
//
// []byte, AV Binary (B)
// [][]byte, AV Binary Set (BS)
// bool, AV Boolean (BOOL)
// []interface{}, AV List (L)
// map[string]interface{}, AV Map (M)
// float64, AV Number (N)
// Number, AV Number (N) with UseNumber set
// []float64, AV Number Set (NS)
// []Number, AV Number Set (NS) with UseNumber set
// string, AV String (S)
// []string, AV String Set (SS)
//
// If the Decoder option, UseNumber is set numbers will be unmarshaled
// as Number values instead of float64. Use this to maintain the original
// string formating of the number as it was represented in the AttributeValue.
// In addition provides additional opportunities to parse the number
// string based on individual use cases.
//
// When unmarshaling any error that occurs will halt the unmarshal
// and return the error.
//
// The output value provided must be a non-nil pointer
func Unmarshal(av *dynamodb.AttributeValue, out interface{}) error {
return NewDecoder().Decode(av, out)
}
// UnmarshalMap is an alias for Unmarshal which unmarshals from
// a map of AttributeValues.
//
// The output value provided must be a non-nil pointer
func UnmarshalMap(m map[string]*dynamodb.AttributeValue, out interface{}) error {
return NewDecoder().Decode(&dynamodb.AttributeValue{M: m}, out)
}
// UnmarshalList is an alias for Unmarshal func which unmarshals
// a slice of AttributeValues.
//
// The output value provided must be a non-nil pointer
func UnmarshalList(l []*dynamodb.AttributeValue, out interface{}) error {
return NewDecoder().Decode(&dynamodb.AttributeValue{L: l}, out)
}
// UnmarshalListOfMaps is an alias for Unmarshal func which unmarshals a
// slice of maps of attribute values.
//
// This is useful for when you need to unmarshal the Items from a DynamoDB
// Query API call.
//
// The output value provided must be a non-nil pointer
func UnmarshalListOfMaps(l []map[string]*dynamodb.AttributeValue, out interface{}) error {
items := make([]*dynamodb.AttributeValue, len(l))
for i, m := range l {
items[i] = &dynamodb.AttributeValue{M: m}
}
return UnmarshalList(items, out)
}
// A Decoder provides unmarshaling AttributeValues to Go value types.
type Decoder struct {
MarshalOptions
// Instructs the decoder to decode AttributeValue Numbers as
// Number type instead of float64 when the destination type
// is interface{}. Similar to encoding/json.Number
UseNumber bool
}
// NewDecoder creates a new Decoder with default configuration. Use
// the `opts` functional options to override the default configuration.
func NewDecoder(opts ...func(*Decoder)) *Decoder {
d := &Decoder{
MarshalOptions: MarshalOptions{
SupportJSONTags: true,
},
}
for _, o := range opts {
o(d)
}
return d
}
// Decode will unmarshal an AttributeValue into a Go value type. An error
// will be return if the decoder is unable to unmarshal the AttributeValue
// to the provide Go value type.
//
// The output value provided must be a non-nil pointer
func (d *Decoder) Decode(av *dynamodb.AttributeValue, out interface{}, opts ...func(*Decoder)) error {
v := reflect.ValueOf(out)
if v.Kind() != reflect.Ptr || v.IsNil() || !v.IsValid() {
return &InvalidUnmarshalError{Type: reflect.TypeOf(out)}
}
return d.decode(av, v, tag{})
}
var stringInterfaceMapType = reflect.TypeOf(map[string]interface{}(nil))
var byteSliceType = reflect.TypeOf([]byte(nil))
var byteSliceSlicetype = reflect.TypeOf([][]byte(nil))
var numberType = reflect.TypeOf(Number(""))
var timeType = reflect.TypeOf(time.Time{})
func (d *Decoder) decode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
var u Unmarshaler
if av == nil || av.NULL != nil {
u, v = indirect(v, true)
if u != nil {
return u.UnmarshalDynamoDBAttributeValue(av)
}
return d.decodeNull(v)
}
u, v = indirect(v, false)
if u != nil {
return u.UnmarshalDynamoDBAttributeValue(av)
}
switch {
case len(av.B) != 0:
return d.decodeBinary(av.B, v)
case av.BOOL != nil:
return d.decodeBool(av.BOOL, v)
case len(av.BS) != 0:
return d.decodeBinarySet(av.BS, v)
case len(av.L) != 0:
return d.decodeList(av.L, v)
case len(av.M) != 0:
return d.decodeMap(av.M, v)
case av.N != nil:
return d.decodeNumber(av.N, v, fieldTag)
case len(av.NS) != 0:
return d.decodeNumberSet(av.NS, v)
case av.S != nil:
return d.decodeString(av.S, v, fieldTag)
case len(av.SS) != 0:
return d.decodeStringSet(av.SS, v)
}
return nil
}
func (d *Decoder) decodeBinary(b []byte, v reflect.Value) error {
if v.Kind() == reflect.Interface {
buf := make([]byte, len(b))
copy(buf, b)
v.Set(reflect.ValueOf(buf))
return nil
}
if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
}
if v.Type() == byteSliceType {
// Optimization for []byte types
if v.IsNil() || v.Cap() < len(b) {
v.Set(reflect.MakeSlice(byteSliceType, len(b), len(b)))
} else if v.Len() != len(b) {
v.SetLen(len(b))
}
copy(v.Interface().([]byte), b)
return nil
}
switch v.Type().Elem().Kind() {
case reflect.Uint8:
// Fallback to reflection copy for type aliased of []byte type
if v.Kind() != reflect.Array && (v.IsNil() || v.Cap() < len(b)) {
v.Set(reflect.MakeSlice(v.Type(), len(b), len(b)))
} else if v.Len() != len(b) {
v.SetLen(len(b))
}
for i := 0; i < len(b); i++ {
v.Index(i).SetUint(uint64(b[i]))
}
default:
if v.Kind() == reflect.Array {
switch v.Type().Elem().Kind() {
case reflect.Uint8:
reflect.Copy(v, reflect.ValueOf(b))
default:
return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
}
break
}
return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
}
return nil
}
func (d *Decoder) decodeBool(b *bool, v reflect.Value) error {
switch v.Kind() {
case reflect.Bool, reflect.Interface:
v.Set(reflect.ValueOf(*b).Convert(v.Type()))
default:
return &UnmarshalTypeError{Value: "bool", Type: v.Type()}
}
return nil
}
func (d *Decoder) decodeBinarySet(bs [][]byte, v reflect.Value) error {
isArray := false
switch v.Kind() {
case reflect.Slice:
// Make room for the slice elements if needed
if v.IsNil() || v.Cap() < len(bs) {
// What about if ignoring nil/empty values?
v.Set(reflect.MakeSlice(v.Type(), 0, len(bs)))
}
case reflect.Array:
// Limited to capacity of existing array.
isArray = true
case reflect.Interface:
set := make([][]byte, len(bs))
for i, b := range bs {
if err := d.decodeBinary(b, reflect.ValueOf(&set[i]).Elem()); err != nil {
return err
}
}
v.Set(reflect.ValueOf(set))
return nil
default:
return &UnmarshalTypeError{Value: "binary set", Type: v.Type()}
}
for i := 0; i < v.Cap() && i < len(bs); i++ {
if !isArray {
v.SetLen(i + 1)
}
u, elem := indirect(v.Index(i), false)
if u != nil {
return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{BS: bs})
}
if err := d.decodeBinary(bs[i], elem); err != nil {
return err
}
}
return nil
}
func (d *Decoder) decodeNumber(n *string, v reflect.Value, fieldTag tag) error {
switch v.Kind() {
case reflect.Interface:
i, err := d.decodeNumberToInterface(n)
if err != nil {
return err
}
v.Set(reflect.ValueOf(i))
return nil
case reflect.String:
if v.Type() == numberType { // Support Number value type
v.Set(reflect.ValueOf(Number(*n)))
return nil
}
v.Set(reflect.ValueOf(*n))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i, err := strconv.ParseInt(*n, 10, 64)
if err != nil {
return err
}
if v.OverflowInt(i) {
return &UnmarshalTypeError{
Value: fmt.Sprintf("number overflow, %s", *n),
Type: v.Type(),
}
}
v.SetInt(i)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
i, err := strconv.ParseUint(*n, 10, 64)
if err != nil {
return err
}
if v.OverflowUint(i) {
return &UnmarshalTypeError{
Value: fmt.Sprintf("number overflow, %s", *n),
Type: v.Type(),
}
}
v.SetUint(i)
case reflect.Float32, reflect.Float64:
i, err := strconv.ParseFloat(*n, 64)
if err != nil {
return err
}
if v.OverflowFloat(i) {
return &UnmarshalTypeError{
Value: fmt.Sprintf("number overflow, %s", *n),
Type: v.Type(),
}
}
v.SetFloat(i)
default:
if v.Type().ConvertibleTo(timeType) && fieldTag.AsUnixTime {
t, err := decodeUnixTime(*n)
if err != nil {
return err
}
v.Set(reflect.ValueOf(t).Convert(v.Type()))
return nil
}
return &UnmarshalTypeError{Value: "number", Type: v.Type()}
}
return nil
}
func (d *Decoder) decodeNumberToInterface(n *string) (interface{}, error) {
if d.UseNumber {
return Number(*n), nil
}
// Default to float64 for all numbers
return strconv.ParseFloat(*n, 64)
}
func (d *Decoder) decodeNumberSet(ns []*string, v reflect.Value) error {
isArray := false
switch v.Kind() {
case reflect.Slice:
// Make room for the slice elements if needed
if v.IsNil() || v.Cap() < len(ns) {
// What about if ignoring nil/empty values?
v.Set(reflect.MakeSlice(v.Type(), 0, len(ns)))
}
case reflect.Array:
// Limited to capacity of existing array.
isArray = true
case reflect.Interface:
if d.UseNumber {
set := make([]Number, len(ns))
for i, n := range ns {
if err := d.decodeNumber(n, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
return err
}
}
v.Set(reflect.ValueOf(set))
} else {
set := make([]float64, len(ns))
for i, n := range ns {
if err := d.decodeNumber(n, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
return err
}
}
v.Set(reflect.ValueOf(set))
}
return nil
default:
return &UnmarshalTypeError{Value: "number set", Type: v.Type()}
}
for i := 0; i < v.Cap() && i < len(ns); i++ {
if !isArray {
v.SetLen(i + 1)
}
u, elem := indirect(v.Index(i), false)
if u != nil {
return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{NS: ns})
}
if err := d.decodeNumber(ns[i], elem, tag{}); err != nil {
return err
}
}
return nil
}
func (d *Decoder) decodeList(avList []*dynamodb.AttributeValue, v reflect.Value) error {
isArray := false
switch v.Kind() {
case reflect.Slice:
// Make room for the slice elements if needed
if v.IsNil() || v.Cap() < len(avList) {
// What about if ignoring nil/empty values?
v.Set(reflect.MakeSlice(v.Type(), 0, len(avList)))
}
case reflect.Array:
// Limited to capacity of existing array.
isArray = true
case reflect.Interface:
s := make([]interface{}, len(avList))
for i, av := range avList {
if err := d.decode(av, reflect.ValueOf(&s[i]).Elem(), tag{}); err != nil {
return err
}
}
v.Set(reflect.ValueOf(s))
return nil
default:
return &UnmarshalTypeError{Value: "list", Type: v.Type()}
}
// If v is not a slice, array
for i := 0; i < v.Cap() && i < len(avList); i++ {
if !isArray {
v.SetLen(i + 1)
}
if err := d.decode(avList[i], v.Index(i), tag{}); err != nil {
return err
}
}
return nil
}
func (d *Decoder) decodeMap(avMap map[string]*dynamodb.AttributeValue, v reflect.Value) error {
switch v.Kind() {
case reflect.Map:
t := v.Type()
if t.Key().Kind() != reflect.String {
return &UnmarshalTypeError{Value: "map string key", Type: t.Key()}
}
if v.IsNil() {
v.Set(reflect.MakeMap(t))
}
case reflect.Struct:
case reflect.Interface:
v.Set(reflect.MakeMap(stringInterfaceMapType))
v = v.Elem()
default:
return &UnmarshalTypeError{Value: "map", Type: v.Type()}
}
if v.Kind() == reflect.Map {
for k, av := range avMap {
key := reflect.ValueOf(k)
elem := reflect.New(v.Type().Elem()).Elem()
if err := d.decode(av, elem, tag{}); err != nil {
return err
}
v.SetMapIndex(key, elem)
}
} else if v.Kind() == reflect.Struct {
fields := unionStructFields(v.Type(), d.MarshalOptions)
for k, av := range avMap {
if f, ok := fieldByName(fields, k); ok {
fv := fieldByIndex(v, f.Index, func(v *reflect.Value) bool {
v.Set(reflect.New(v.Type().Elem()))
return true // to continue the loop.
})
if err := d.decode(av, fv, f.tag); err != nil {
return err
}
}
}
}
return nil
}
func (d *Decoder) decodeNull(v reflect.Value) error {
if v.IsValid() && v.CanSet() {
v.Set(reflect.Zero(v.Type()))
}
return nil
}
func (d *Decoder) decodeString(s *string, v reflect.Value, fieldTag tag) error {
if fieldTag.AsString {
return d.decodeNumber(s, v, fieldTag)
}
// To maintain backwards compatibility with ConvertFrom family of methods which
// converted strings to time.Time structs
if v.Type().ConvertibleTo(timeType) {
t, err := time.Parse(time.RFC3339, *s)
if err != nil {
return err
}
v.Set(reflect.ValueOf(t).Convert(v.Type()))
return nil
}
switch v.Kind() {
case reflect.String:
v.SetString(*s)
case reflect.Interface:
// Ensure type aliasing is handled properly
v.Set(reflect.ValueOf(*s).Convert(v.Type()))
default:
return &UnmarshalTypeError{Value: "string", Type: v.Type()}
}
return nil
}
func (d *Decoder) decodeStringSet(ss []*string, v reflect.Value) error {
isArray := false
switch v.Kind() {
case reflect.Slice:
// Make room for the slice elements if needed
if v.IsNil() || v.Cap() < len(ss) {
v.Set(reflect.MakeSlice(v.Type(), 0, len(ss)))
}
case reflect.Array:
// Limited to capacity of existing array.
isArray = true
case reflect.Interface:
set := make([]string, len(ss))
for i, s := range ss {
if err := d.decodeString(s, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
return err
}
}
v.Set(reflect.ValueOf(set))
return nil
default:
return &UnmarshalTypeError{Value: "string set", Type: v.Type()}
}
for i := 0; i < v.Cap() && i < len(ss); i++ {
if !isArray {
v.SetLen(i + 1)
}
u, elem := indirect(v.Index(i), false)
if u != nil {
return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{SS: ss})
}
if err := d.decodeString(ss[i], elem, tag{}); err != nil {
return err
}
}
return nil
}
func decodeUnixTime(n string) (time.Time, error) {
v, err := strconv.ParseInt(n, 10, 64)
if err != nil {
return time.Time{}, &UnmarshalError{
Err: err, Value: n, Type: timeType,
}
}
return time.Unix(v, 0), nil
}
// indirect will walk a value's interface or pointer value types. Returning
// the final value or the value a unmarshaler is defined on.
//
// Based on the enoding/json type reflect value type indirection in Go Stdlib
// https://golang.org/src/encoding/json/decode.go indirect func.
func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
v = v.Addr()
}
for {
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(Unmarshaler); ok {
return u, reflect.Value{}
}
}
v = v.Elem()
}
return nil, v
}
// A Number represents a Attributevalue number literal.
type Number string
// Float64 attempts to cast the number ot a float64, returning
// the result of the case or error if the case failed.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 attempts to cast the number ot a int64, returning
// the result of the case or error if the case failed.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
// Uint64 attempts to cast the number ot a uint64, returning
// the result of the case or error if the case failed.
func (n Number) Uint64() (uint64, error) {
return strconv.ParseUint(string(n), 10, 64)
}
// String returns the raw number represented as a string
func (n Number) String() string {
return string(n)
}
type emptyOrigError struct{}
func (e emptyOrigError) OrigErr() error {
return nil
}
// An UnmarshalTypeError is an error type representing a error
// unmarshaling the AttributeValue's element to a Go value type.
// Includes details about the AttributeValue type and Go value type.
type UnmarshalTypeError struct {
emptyOrigError
Value string
Type reflect.Type
}
// Error returns the string representation of the error.
// satisfying the error interface
func (e *UnmarshalTypeError) Error() string {
return fmt.Sprintf("%s: %s", e.Code(), e.Message())
}
// Code returns the code of the error, satisfying the awserr.Error
// interface.
func (e *UnmarshalTypeError) Code() string {
return "UnmarshalTypeError"
}
// Message returns the detailed message of the error, satisfying
// the awserr.Error interface.
func (e *UnmarshalTypeError) Message() string {
return "cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
}
// An InvalidUnmarshalError is an error type representing an invalid type
// encountered while unmarshaling a AttributeValue to a Go value type.
type InvalidUnmarshalError struct {
emptyOrigError
Type reflect.Type
}
// Error returns the string representation of the error.
// satisfying the error interface
func (e *InvalidUnmarshalError) Error() string {
return fmt.Sprintf("%s: %s", e.Code(), e.Message())
}
// Code returns the code of the error, satisfying the awserr.Error
// interface.
func (e *InvalidUnmarshalError) Code() string {
return "InvalidUnmarshalError"
}
// Message returns the detailed message of the error, satisfying
// the awserr.Error interface.
func (e *InvalidUnmarshalError) Message() string {
if e.Type == nil {
return "cannot unmarshal to nil value"
}
if e.Type.Kind() != reflect.Ptr {
return "cannot unmarshal to non-pointer value, got " + e.Type.String()
}
return "cannot unmarshal to nil value, " + e.Type.String()
}
// An UnmarshalError wraps an error that occured while unmarshaling a DynamoDB
// AttributeValue element into a Go type. This is different from UnmarshalTypeError
// in that it wraps the underlying error that occured.
type UnmarshalError struct {
Err error
Value string
Type reflect.Type
}
// Error returns the string representation of the error.
// satisfying the error interface.
func (e *UnmarshalError) Error() string {
return fmt.Sprintf("%s: %s\ncaused by: %v", e.Code(), e.Message(), e.Err)
}
// OrigErr returns the original error that caused this issue.
func (e UnmarshalError) OrigErr() error {
return e.Err
}
// Code returns the code of the error, satisfying the awserr.Error
// interface.
func (e *UnmarshalError) Code() string {
return "UnmarshalError"
}
// Message returns the detailed message of the error, satisfying
// the awserr.Error interface.
func (e *UnmarshalError) Message() string {
return fmt.Sprintf("cannot unmarshal %q into %s.",
e.Value, e.Type.String())
}

View file

@ -1,95 +0,0 @@
// Package dynamodbattribute provides marshaling and unmarshaling utilities to
// convert between Go types and dynamodb.AttributeValues.
//
// These utilities allow you to marshal slices, maps, structs, and scalar values
// to and from dynamodb.AttributeValue. These are useful when marshaling
// Go value tyes to dynamodb.AttributeValue for DynamoDB requests, or
// unmarshaling the dynamodb.AttributeValue back into a Go value type.
//
// AttributeValue Marshaling
//
// To marshal a Go type to a dynamodbAttributeValue you can use the Marshal
// functions in the dynamodbattribute package. There are specialized versions
// of these functions for collections of Attributevalue, such as maps and lists.
//
// The following example uses MarshalMap to convert the Record Go type to a
// dynamodb.AttributeValue type and use the value to make a PutItem API request.
//
// type Record struct {
// ID string
// URLs []string
// }
//
// //...
//
// r := Record{
// ID: "ABC123",
// URLs: []string{
// "https://example.com/first/link",
// "https://example.com/second/url",
// },
// }
// av, err := dynamodbattribute.MarshalMap(r)
// if err != nil {
// panic(fmt.Sprintf("failed to DynamoDB marshal Record, %v", err))
// }
//
// _, err = svc.PutItem(&dynamodb.PutItemInput{
// TableName: aws.String(myTableName),
// Item: av,
// })
// if err != nil {
// panic(fmt.Sprintf("failed to put Record to DynamoDB, %v", err))
// }
//
// AttributeValue Unmarshaling
//
// To unmarshal a dynamodb.AttributeValue to a Go type you can use the Unmarshal
// functions in the dynamodbattribute package. There are specialized versions
// of these functions for collections of Attributevalue, such as maps and lists.
//
// The following example will unmarshal the DynamoDB's Scan API operation. The
// Items returned by the operation will be unmarshaled into the slice of Records
// Go type.
//
// type Record struct {
// ID string
// URLs []string
// }
//
// //...
//
// var records []Record
//
// // Use the ScanPages method to perform the scan with pagination. Use
// // just Scan method to make the API call without pagination.
// err := svc.ScanPages(&dynamodb.ScanInput{
// TableName: aws.String(myTableName),
// }, func(page *dynamodb.ScanOutput, last bool) bool {
// recs := []Record{}
//
// err := dynamodbattribute.UnmarshalListOfMaps(page.Items, &recs)
// if err != nil {
// panic(fmt.Sprintf("failed to unmarshal Dynamodb Scan Items, %v", err))
// }
//
// records = append(records, recs...)
//
// return true // keep paging
// })
//
// The ConvertTo, ConvertToList, ConvertToMap, ConvertFrom, ConvertFromMap
// and ConvertFromList methods have been deprecated. The Marshal and Unmarshal
// functions should be used instead. The ConvertTo|From marshallers do not
// support BinarySet, NumberSet, nor StringSets, and will incorrect marshal
// binary data fields in structs as base64 strings.
//
// The Marshal and Unmarshal functions correct this behavior, and removes
// the reliance on encoding.json. `json` struct tags are still supported. In
// addition support for a new struct tag `dynamodbav` was added. Support for
// the json.Marshaler and json.Unmarshaler interfaces have been removed and
// replaced with have been replaced with dynamodbattribute.Marshaler and
// dynamodbattribute.Unmarshaler interfaces.
//
// `time.Time` is marshaled as RFC3339 format.
package dynamodbattribute

View file

@ -1,641 +0,0 @@
package dynamodbattribute
import (
"fmt"
"reflect"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
// An UnixTime provides aliasing of time.Time into a type that when marshaled
// and unmarshaled with DynamoDB AttributeValues it will be done so as number
// instead of string in seconds since January 1, 1970 UTC.
//
// This type is useful as an alternative to the struct tag `unixtime` when you
// want to have your time value marshaled as Unix time in seconds intead of
// the default time.RFC3339.
//
// Important to note that zero value time as unixtime is not 0 seconds
// from January 1, 1970 UTC, but -62135596800. Which is seconds between
// January 1, 0001 UTC, and January 1, 0001 UTC.
type UnixTime time.Time
// MarshalDynamoDBAttributeValue implements the Marshaler interface so that
// the UnixTime can be marshaled from to a DynamoDB AttributeValue number
// value encoded in the number of seconds since January 1, 1970 UTC.
func (e UnixTime) MarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
t := time.Time(e)
s := strconv.FormatInt(t.Unix(), 10)
av.N = &s
return nil
}
// UnmarshalDynamoDBAttributeValue implements the Unmarshaler interface so that
// the UnixTime can be unmarshaled from a DynamoDB AttributeValue number representing
// the number of seconds since January 1, 1970 UTC.
//
// If an error parsing the AttributeValue number occurs UnmarshalError will be
// returned.
func (e *UnixTime) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
t, err := decodeUnixTime(aws.StringValue(av.N))
if err != nil {
return err
}
*e = UnixTime(t)
return nil
}
// A Marshaler is an interface to provide custom marshaling of Go value types
// to AttributeValues. Use this to provide custom logic determining how a
// Go Value type should be marshaled.
//
// type ExampleMarshaler struct {
// Value int
// }
// func (m *ExampleMarshaler) MarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
// n := fmt.Sprintf("%v", m.Value)
// av.N = &n
// return nil
// }
//
type Marshaler interface {
MarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error
}
// Marshal will serialize the passed in Go value type into a DynamoDB AttributeValue
// type. This value can be used in DynamoDB API operations to simplify marshaling
// your Go value types into AttributeValues.
//
// Marshal will recursively transverse the passed in value marshaling its
// contents into a AttributeValue. Marshal supports basic scalars
// (int,uint,float,bool,string), maps, slices, and structs. Anonymous
// nested types are flattened based on Go anonymous type visibility.
//
// Marshaling slices to AttributeValue will default to a List for all
// types except for []byte and [][]byte. []byte will be marshaled as
// Binary data (B), and [][]byte will be marshaled as binary data set
// (BS).
//
// `dynamodbav` struct tag can be used to control how the value will be
// marshaled into a AttributeValue.
//
// // Field is ignored
// Field int `dynamodbav:"-"`
//
// // Field AttributeValue map key "myName"
// Field int `dynamodbav:"myName"`
//
// // Field AttributeValue map key "myName", and
// // Field is omitted if it is empty
// Field int `dynamodbav:"myName,omitempty"`
//
// // Field AttributeValue map key "Field", and
// // Field is omitted if it is empty
// Field int `dynamodbav:",omitempty"`
//
// // Field's elems will be omitted if empty
// // only valid for slices, and maps.
// Field []string `dynamodbav:",omitemptyelem"`
//
// // Field will be marshaled as a AttributeValue string
// // only value for number types, (int,uint,float)
// Field int `dynamodbav:",string"`
//
// // Field will be marshaled as a binary set
// Field [][]byte `dynamodbav:",binaryset"`
//
// // Field will be marshaled as a number set
// Field []int `dynamodbav:",numberset"`
//
// // Field will be marshaled as a string set
// Field []string `dynamodbav:",stringset"`
//
// // Field will be marshaled as Unix time number in seconds.
// // This tag is only valid with time.Time typed struct fields.
// // Important to note that zero value time as unixtime is not 0 seconds
// // from January 1, 1970 UTC, but -62135596800. Which is seconds between
// // January 1, 0001 UTC, and January 1, 0001 UTC.
// Field time.Time `dynamodbav:",unixtime"`
//
// The omitempty tag is only used during Marshaling and is ignored for
// Unmarshal. Any zero value or a value when marshaled results in a
// AttributeValue NULL will be added to AttributeValue Maps during struct
// marshal. The omitemptyelem tag works the same as omitempty except it
// applies to maps and slices instead of struct fields, and will not be
// included in the marshaled AttributeValue Map, List, or Set.
//
// For convenience and backwards compatibility with ConvertTo functions
// json struct tags are supported by the Marshal and Unmarshal. If
// both json and dynamodbav struct tags are provided the json tag will
// be ignored in favor of dynamodbav.
//
// All struct fields and with anonymous fields, are marshaled unless the
// any of the following conditions are meet.
//
// - the field is not exported
// - json or dynamodbav field tag is "-"
// - json or dynamodbav field tag specifies "omitempty", and is empty.
//
// Pointer and interfaces values encode as the value pointed to or contained
// in the interface. A nil value encodes as the AttributeValue NULL value.
//
// Channel, complex, and function values are not encoded and will be skipped
// when walking the value to be marshaled.
//
// When marshaling any error that occurs will halt the marshal and return
// the error.
//
// Marshal cannot represent cyclic data structures and will not handle them.
// Passing cyclic structures to Marshal will result in an infinite recursion.
func Marshal(in interface{}) (*dynamodb.AttributeValue, error) {
return NewEncoder().Encode(in)
}
// MarshalMap is an alias for Marshal func which marshals Go value
// type to a map of AttributeValues.
//
// This is useful for DynamoDB APIs such as PutItem.
func MarshalMap(in interface{}) (map[string]*dynamodb.AttributeValue, error) {
av, err := NewEncoder().Encode(in)
if err != nil || av == nil || av.M == nil {
return map[string]*dynamodb.AttributeValue{}, err
}
return av.M, nil
}
// MarshalList is an alias for Marshal func which marshals Go value
// type to a slice of AttributeValues.
func MarshalList(in interface{}) ([]*dynamodb.AttributeValue, error) {
av, err := NewEncoder().Encode(in)
if err != nil || av == nil || av.L == nil {
return []*dynamodb.AttributeValue{}, err
}
return av.L, nil
}
// A MarshalOptions is a collection of options shared between marshaling
// and unmarshaling
type MarshalOptions struct {
// States that the encoding/json struct tags should be supported.
// if a `dynamodbav` struct tag is also provided the encoding/json
// tag will be ignored.
//
// Enabled by default.
SupportJSONTags bool
}
// An Encoder provides marshaling Go value types to AttributeValues.
type Encoder struct {
MarshalOptions
// Empty strings, "", will be marked as NULL AttributeValue types.
// Empty strings are not valid values for DynamoDB. Will not apply
// to lists, sets, or maps. Use the struct tag `omitemptyelem`
// to skip empty (zero) values in lists, sets and maps.
//
// Enabled by default.
NullEmptyString bool
}
// NewEncoder creates a new Encoder with default configuration. Use
// the `opts` functional options to override the default configuration.
func NewEncoder(opts ...func(*Encoder)) *Encoder {
e := &Encoder{
MarshalOptions: MarshalOptions{
SupportJSONTags: true,
},
NullEmptyString: true,
}
for _, o := range opts {
o(e)
}
return e
}
// Encode will marshal a Go value type to an AttributeValue. Returning
// the AttributeValue constructed or error.
func (e *Encoder) Encode(in interface{}) (*dynamodb.AttributeValue, error) {
av := &dynamodb.AttributeValue{}
if err := e.encode(av, reflect.ValueOf(in), tag{}); err != nil {
return nil, err
}
return av, nil
}
func fieldByIndex(v reflect.Value, index []int,
OnEmbeddedNilStruct func(*reflect.Value) bool) reflect.Value {
fv := v
for i, x := range index {
if i > 0 {
if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
if fv.IsNil() && !OnEmbeddedNilStruct(&fv) {
break
}
fv = fv.Elem()
}
}
fv = fv.Field(x)
}
return fv
}
func (e *Encoder) encode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
// We should check for omitted values first before dereferencing.
if fieldTag.OmitEmpty && emptyValue(v) {
encodeNull(av)
return nil
}
// Handle both pointers and interface conversion into types
v = valueElem(v)
if v.Kind() != reflect.Invalid {
if used, err := tryMarshaler(av, v); used {
return err
}
}
switch v.Kind() {
case reflect.Invalid:
encodeNull(av)
case reflect.Struct:
return e.encodeStruct(av, v, fieldTag)
case reflect.Map:
return e.encodeMap(av, v, fieldTag)
case reflect.Slice, reflect.Array:
return e.encodeSlice(av, v, fieldTag)
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
// do nothing for unsupported types
default:
return e.encodeScalar(av, v, fieldTag)
}
return nil
}
func (e *Encoder) encodeStruct(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
// To maintain backwards compatibility with ConvertTo family of methods which
// converted time.Time structs to strings
if v.Type().ConvertibleTo(timeType) {
var t time.Time
t = v.Convert(timeType).Interface().(time.Time)
if fieldTag.AsUnixTime {
return UnixTime(t).MarshalDynamoDBAttributeValue(av)
}
s := t.Format(time.RFC3339Nano)
av.S = &s
return nil
}
av.M = map[string]*dynamodb.AttributeValue{}
fields := unionStructFields(v.Type(), e.MarshalOptions)
for _, f := range fields {
if f.Name == "" {
return &InvalidMarshalError{msg: "map key cannot be empty"}
}
found := true
fv := fieldByIndex(v, f.Index, func(v *reflect.Value) bool {
found = false
return false // to break the loop.
})
if !found {
continue
}
elem := &dynamodb.AttributeValue{}
err := e.encode(elem, fv, f.tag)
if err != nil {
return err
}
skip, err := keepOrOmitEmpty(f.OmitEmpty, elem, err)
if err != nil {
return err
} else if skip {
continue
}
av.M[f.Name] = elem
}
if len(av.M) == 0 {
encodeNull(av)
}
return nil
}
func (e *Encoder) encodeMap(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
av.M = map[string]*dynamodb.AttributeValue{}
for _, key := range v.MapKeys() {
keyName := fmt.Sprint(key.Interface())
if keyName == "" {
return &InvalidMarshalError{msg: "map key cannot be empty"}
}
elemVal := v.MapIndex(key)
elem := &dynamodb.AttributeValue{}
err := e.encode(elem, elemVal, tag{})
skip, err := keepOrOmitEmpty(fieldTag.OmitEmptyElem, elem, err)
if err != nil {
return err
} else if skip {
continue
}
av.M[keyName] = elem
}
if len(av.M) == 0 {
encodeNull(av)
}
return nil
}
func (e *Encoder) encodeSlice(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
switch v.Type().Elem().Kind() {
case reflect.Uint8:
slice := reflect.MakeSlice(byteSliceType, v.Len(), v.Len())
reflect.Copy(slice, v)
b := slice.Bytes()
if len(b) == 0 {
encodeNull(av)
return nil
}
av.B = append([]byte{}, b...)
default:
var elemFn func(dynamodb.AttributeValue) error
if fieldTag.AsBinSet || v.Type() == byteSliceSlicetype { // Binary Set
av.BS = make([][]byte, 0, v.Len())
elemFn = func(elem dynamodb.AttributeValue) error {
if elem.B == nil {
return &InvalidMarshalError{msg: "binary set must only contain non-nil byte slices"}
}
av.BS = append(av.BS, elem.B)
return nil
}
} else if fieldTag.AsNumSet { // Number Set
av.NS = make([]*string, 0, v.Len())
elemFn = func(elem dynamodb.AttributeValue) error {
if elem.N == nil {
return &InvalidMarshalError{msg: "number set must only contain non-nil string numbers"}
}
av.NS = append(av.NS, elem.N)
return nil
}
} else if fieldTag.AsStrSet { // String Set
av.SS = make([]*string, 0, v.Len())
elemFn = func(elem dynamodb.AttributeValue) error {
if elem.S == nil {
return &InvalidMarshalError{msg: "string set must only contain non-nil strings"}
}
av.SS = append(av.SS, elem.S)
return nil
}
} else { // List
av.L = make([]*dynamodb.AttributeValue, 0, v.Len())
elemFn = func(elem dynamodb.AttributeValue) error {
av.L = append(av.L, &elem)
return nil
}
}
if n, err := e.encodeList(v, fieldTag, elemFn); err != nil {
return err
} else if n == 0 {
encodeNull(av)
}
}
return nil
}
func (e *Encoder) encodeList(v reflect.Value, fieldTag tag, elemFn func(dynamodb.AttributeValue) error) (int, error) {
count := 0
for i := 0; i < v.Len(); i++ {
elem := dynamodb.AttributeValue{}
err := e.encode(&elem, v.Index(i), tag{OmitEmpty: fieldTag.OmitEmptyElem})
skip, err := keepOrOmitEmpty(fieldTag.OmitEmptyElem, &elem, err)
if err != nil {
return 0, err
} else if skip {
continue
}
if err := elemFn(elem); err != nil {
return 0, err
}
count++
}
return count, nil
}
func (e *Encoder) encodeScalar(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
if v.Type() == numberType {
s := v.String()
if fieldTag.AsString {
av.S = &s
} else {
av.N = &s
}
return nil
}
switch v.Kind() {
case reflect.Bool:
av.BOOL = new(bool)
*av.BOOL = v.Bool()
case reflect.String:
if err := e.encodeString(av, v); err != nil {
return err
}
default:
// Fallback to encoding numbers, will return invalid type if not supported
if err := e.encodeNumber(av, v); err != nil {
return err
}
if fieldTag.AsString && av.NULL == nil && av.N != nil {
av.S = av.N
av.N = nil
}
}
return nil
}
func (e *Encoder) encodeNumber(av *dynamodb.AttributeValue, v reflect.Value) error {
if used, err := tryMarshaler(av, v); used {
return err
}
var out string
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out = encodeInt(v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
out = encodeUint(v.Uint())
case reflect.Float32, reflect.Float64:
out = encodeFloat(v.Float())
default:
return &unsupportedMarshalTypeError{Type: v.Type()}
}
av.N = &out
return nil
}
func (e *Encoder) encodeString(av *dynamodb.AttributeValue, v reflect.Value) error {
if used, err := tryMarshaler(av, v); used {
return err
}
switch v.Kind() {
case reflect.String:
s := v.String()
if len(s) == 0 && e.NullEmptyString {
encodeNull(av)
} else {
av.S = &s
}
default:
return &unsupportedMarshalTypeError{Type: v.Type()}
}
return nil
}
func encodeInt(i int64) string {
return strconv.FormatInt(i, 10)
}
func encodeUint(u uint64) string {
return strconv.FormatUint(u, 10)
}
func encodeFloat(f float64) string {
return strconv.FormatFloat(f, 'f', -1, 64)
}
func encodeNull(av *dynamodb.AttributeValue) {
t := true
*av = dynamodb.AttributeValue{NULL: &t}
}
func valueElem(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Interface, reflect.Ptr:
for v.Kind() == reflect.Interface || v.Kind() == reflect.Ptr {
v = v.Elem()
}
}
return v
}
func emptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func tryMarshaler(av *dynamodb.AttributeValue, v reflect.Value) (bool, error) {
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
v = v.Addr()
}
if v.Type().NumMethod() == 0 {
return false, nil
}
if m, ok := v.Interface().(Marshaler); ok {
return true, m.MarshalDynamoDBAttributeValue(av)
}
return false, nil
}
func keepOrOmitEmpty(omitEmpty bool, av *dynamodb.AttributeValue, err error) (bool, error) {
if err != nil {
if _, ok := err.(*unsupportedMarshalTypeError); ok {
return true, nil
}
return false, err
}
if av.NULL != nil && omitEmpty {
return true, nil
}
return false, nil
}
// An InvalidMarshalError is an error type representing an error
// occurring when marshaling a Go value type to an AttributeValue.
type InvalidMarshalError struct {
emptyOrigError
msg string
}
// Error returns the string representation of the error.
// satisfying the error interface
func (e *InvalidMarshalError) Error() string {
return fmt.Sprintf("%s: %s", e.Code(), e.Message())
}
// Code returns the code of the error, satisfying the awserr.Error
// interface.
func (e *InvalidMarshalError) Code() string {
return "InvalidMarshalError"
}
// Message returns the detailed message of the error, satisfying
// the awserr.Error interface.
func (e *InvalidMarshalError) Message() string {
return e.msg
}
// An unsupportedMarshalTypeError represents a Go value type
// which cannot be marshaled into an AttributeValue and should
// be skipped by the marshaler.
type unsupportedMarshalTypeError struct {
emptyOrigError
Type reflect.Type
}
// Error returns the string representation of the error.
// satisfying the error interface
func (e *unsupportedMarshalTypeError) Error() string {
return fmt.Sprintf("%s: %s", e.Code(), e.Message())
}
// Code returns the code of the error, satisfying the awserr.Error
// interface.
func (e *unsupportedMarshalTypeError) Code() string {
return "unsupportedMarshalTypeError"
}
// Message returns the detailed message of the error, satisfying
// the awserr.Error interface.
func (e *unsupportedMarshalTypeError) Message() string {
return "Go value type " + e.Type.String() + " is not supported"
}

View file

@ -1,269 +0,0 @@
package dynamodbattribute
import (
"reflect"
"sort"
"strings"
)
type field struct {
tag
Name string
NameFromTag bool
Index []int
Type reflect.Type
}
func fieldByName(fields []field, name string) (field, bool) {
foldExists := false
foldField := field{}
for _, f := range fields {
if f.Name == name {
return f, true
}
if !foldExists && strings.EqualFold(f.Name, name) {
foldField = f
foldExists = true
}
}
return foldField, foldExists
}
func buildField(pIdx []int, i int, sf reflect.StructField, fieldTag tag) field {
f := field{
Name: sf.Name,
Type: sf.Type,
tag: fieldTag,
}
if len(fieldTag.Name) != 0 {
f.NameFromTag = true
f.Name = fieldTag.Name
}
f.Index = make([]int, len(pIdx)+1)
copy(f.Index, pIdx)
f.Index[len(pIdx)] = i
return f
}
func unionStructFields(t reflect.Type, opts MarshalOptions) []field {
fields := enumFields(t, opts)
sort.Sort(fieldsByName(fields))
fields = visibleFields(fields)
return fields
}
// enumFields will recursively iterate through a structure and its nested
// anonymous fields.
//
// Based on the enoding/json struct field enumeration of the Go Stdlib
// https://golang.org/src/encoding/json/encode.go typeField func.
func enumFields(t reflect.Type, opts MarshalOptions) []field {
// Fields to explore
current := []field{}
next := []field{{Type: t}}
// count of queued names
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
visited := map[reflect.Type]struct{}{}
fields := []field{}
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if _, ok := visited[f.Type]; ok {
continue
}
visited[f.Type] = struct{}{}
for i := 0; i < f.Type.NumField(); i++ {
sf := f.Type.Field(i)
if sf.PkgPath != "" && !sf.Anonymous {
// Ignore unexported and non-anonymous fields
// unexported but anonymous field may still be used if
// the type has exported nested fields
continue
}
fieldTag := tag{}
fieldTag.parseAVTag(sf.Tag)
if opts.SupportJSONTags && fieldTag == (tag{}) {
fieldTag.parseJSONTag(sf.Tag)
}
if fieldTag.Ignore {
continue
}
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
ft = ft.Elem()
}
structField := buildField(f.Index, i, sf, fieldTag)
structField.Type = ft
if !sf.Anonymous || ft.Kind() != reflect.Struct {
fields = append(fields, structField)
if count[f.Type] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, structField)
}
continue
}
// Record new anon struct to explore next round
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, structField)
}
}
}
}
return fields
}
// visibleFields will return a slice of fields which are visible based on
// Go's standard visiblity rules with the exception of ties being broken
// by depth and struct tag naming.
//
// Based on the enoding/json field filtering of the Go Stdlib
// https://golang.org/src/encoding/json/encode.go typeField func.
func visibleFields(fields []field) []field {
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.Name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.Name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(fieldsByIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
//
// Based on the enoding/json field filtering of the Go Stdlib
// https://golang.org/src/encoding/json/encode.go dominantField func.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].Index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.Index) > length {
fields = fields[:i]
break
}
if f.NameFromTag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
// fieldsByName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
//
// Based on the enoding/json field filtering of the Go Stdlib
// https://golang.org/src/encoding/json/encode.go fieldsByName type.
type fieldsByName []field
func (x fieldsByName) Len() int { return len(x) }
func (x fieldsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x fieldsByName) Less(i, j int) bool {
if x[i].Name != x[j].Name {
return x[i].Name < x[j].Name
}
if len(x[i].Index) != len(x[j].Index) {
return len(x[i].Index) < len(x[j].Index)
}
if x[i].NameFromTag != x[j].NameFromTag {
return x[i].NameFromTag
}
return fieldsByIndex(x).Less(i, j)
}
// fieldsByIndex sorts field by index sequence.
//
// Based on the enoding/json field filtering of the Go Stdlib
// https://golang.org/src/encoding/json/encode.go fieldsByIndex type.
type fieldsByIndex []field
func (x fieldsByIndex) Len() int { return len(x) }
func (x fieldsByIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x fieldsByIndex) Less(i, j int) bool {
for k, xik := range x[i].Index {
if k >= len(x[j].Index) {
return false
}
if xik != x[j].Index[k] {
return xik < x[j].Index[k]
}
}
return len(x[i].Index) < len(x[j].Index)
}

View file

@ -1,68 +0,0 @@
package dynamodbattribute
import (
"reflect"
"strings"
)
type tag struct {
Name string
Ignore bool
OmitEmpty bool
OmitEmptyElem bool
AsString bool
AsBinSet, AsNumSet, AsStrSet bool
AsUnixTime bool
}
func (t *tag) parseAVTag(structTag reflect.StructTag) {
tagStr := structTag.Get("dynamodbav")
if len(tagStr) == 0 {
return
}
t.parseTagStr(tagStr)
}
func (t *tag) parseJSONTag(structTag reflect.StructTag) {
tagStr := structTag.Get("json")
if len(tagStr) == 0 {
return
}
t.parseTagStr(tagStr)
}
func (t *tag) parseTagStr(tagStr string) {
parts := strings.Split(tagStr, ",")
if len(parts) == 0 {
return
}
if name := parts[0]; name == "-" {
t.Name = ""
t.Ignore = true
} else {
t.Name = name
t.Ignore = false
}
for _, opt := range parts[1:] {
switch opt {
case "omitempty":
t.OmitEmpty = true
case "omitemptyelem":
t.OmitEmptyElem = true
case "string":
t.AsString = true
case "binaryset":
t.AsBinSet = true
case "numberset":
t.AsNumSet = true
case "stringset":
t.AsStrSet = true
case "unixtime":
t.AsUnixTime = true
}
}
}

View file

@ -1,214 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package dynamodbiface provides an interface to enable mocking the Amazon DynamoDB service client
// for testing your code.
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters.
package dynamodbiface
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
// DynamoDBAPI provides an interface to enable mocking the
// dynamodb.DynamoDB service client's API operation,
// paginators, and waiters. This make unit testing your code that calls out
// to the SDK's service client's calls easier.
//
// The best way to use this interface is so the SDK's service client's calls
// can be stubbed out for unit testing your code with the SDK without needing
// to inject custom request handlers into the SDK's request pipeline.
//
// // myFunc uses an SDK service client to make a request to
// // Amazon DynamoDB.
// func myFunc(svc dynamodbiface.DynamoDBAPI) bool {
// // Make svc.BatchGetItem request
// }
//
// func main() {
// sess := session.New()
// svc := dynamodb.New(sess)
//
// myFunc(svc)
// }
//
// In your _test.go file:
//
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockDynamoDBClient struct {
// dynamodbiface.DynamoDBAPI
// }
// func (m *mockDynamoDBClient) BatchGetItem(input *dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error) {
// // mock response/functionality
// }
//
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockDynamoDBClient{}
//
// myfunc(mockSvc)
//
// // Verify myFunc's functionality
// }
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters. Its suggested to use the pattern above for testing, or using
// tooling to generate mocks to satisfy the interfaces.
type DynamoDBAPI interface {
BatchGetItem(*dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error)
BatchGetItemWithContext(aws.Context, *dynamodb.BatchGetItemInput, ...request.Option) (*dynamodb.BatchGetItemOutput, error)
BatchGetItemRequest(*dynamodb.BatchGetItemInput) (*request.Request, *dynamodb.BatchGetItemOutput)
BatchGetItemPages(*dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool) error
BatchGetItemPagesWithContext(aws.Context, *dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool, ...request.Option) error
BatchWriteItem(*dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error)
BatchWriteItemWithContext(aws.Context, *dynamodb.BatchWriteItemInput, ...request.Option) (*dynamodb.BatchWriteItemOutput, error)
BatchWriteItemRequest(*dynamodb.BatchWriteItemInput) (*request.Request, *dynamodb.BatchWriteItemOutput)
CreateBackup(*dynamodb.CreateBackupInput) (*dynamodb.CreateBackupOutput, error)
CreateBackupWithContext(aws.Context, *dynamodb.CreateBackupInput, ...request.Option) (*dynamodb.CreateBackupOutput, error)
CreateBackupRequest(*dynamodb.CreateBackupInput) (*request.Request, *dynamodb.CreateBackupOutput)
CreateGlobalTable(*dynamodb.CreateGlobalTableInput) (*dynamodb.CreateGlobalTableOutput, error)
CreateGlobalTableWithContext(aws.Context, *dynamodb.CreateGlobalTableInput, ...request.Option) (*dynamodb.CreateGlobalTableOutput, error)
CreateGlobalTableRequest(*dynamodb.CreateGlobalTableInput) (*request.Request, *dynamodb.CreateGlobalTableOutput)
CreateTable(*dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error)
CreateTableWithContext(aws.Context, *dynamodb.CreateTableInput, ...request.Option) (*dynamodb.CreateTableOutput, error)
CreateTableRequest(*dynamodb.CreateTableInput) (*request.Request, *dynamodb.CreateTableOutput)
DeleteBackup(*dynamodb.DeleteBackupInput) (*dynamodb.DeleteBackupOutput, error)
DeleteBackupWithContext(aws.Context, *dynamodb.DeleteBackupInput, ...request.Option) (*dynamodb.DeleteBackupOutput, error)
DeleteBackupRequest(*dynamodb.DeleteBackupInput) (*request.Request, *dynamodb.DeleteBackupOutput)
DeleteItem(*dynamodb.DeleteItemInput) (*dynamodb.DeleteItemOutput, error)
DeleteItemWithContext(aws.Context, *dynamodb.DeleteItemInput, ...request.Option) (*dynamodb.DeleteItemOutput, error)
DeleteItemRequest(*dynamodb.DeleteItemInput) (*request.Request, *dynamodb.DeleteItemOutput)
DeleteTable(*dynamodb.DeleteTableInput) (*dynamodb.DeleteTableOutput, error)
DeleteTableWithContext(aws.Context, *dynamodb.DeleteTableInput, ...request.Option) (*dynamodb.DeleteTableOutput, error)
DeleteTableRequest(*dynamodb.DeleteTableInput) (*request.Request, *dynamodb.DeleteTableOutput)
DescribeBackup(*dynamodb.DescribeBackupInput) (*dynamodb.DescribeBackupOutput, error)
DescribeBackupWithContext(aws.Context, *dynamodb.DescribeBackupInput, ...request.Option) (*dynamodb.DescribeBackupOutput, error)
DescribeBackupRequest(*dynamodb.DescribeBackupInput) (*request.Request, *dynamodb.DescribeBackupOutput)
DescribeContinuousBackups(*dynamodb.DescribeContinuousBackupsInput) (*dynamodb.DescribeContinuousBackupsOutput, error)
DescribeContinuousBackupsWithContext(aws.Context, *dynamodb.DescribeContinuousBackupsInput, ...request.Option) (*dynamodb.DescribeContinuousBackupsOutput, error)
DescribeContinuousBackupsRequest(*dynamodb.DescribeContinuousBackupsInput) (*request.Request, *dynamodb.DescribeContinuousBackupsOutput)
DescribeGlobalTable(*dynamodb.DescribeGlobalTableInput) (*dynamodb.DescribeGlobalTableOutput, error)
DescribeGlobalTableWithContext(aws.Context, *dynamodb.DescribeGlobalTableInput, ...request.Option) (*dynamodb.DescribeGlobalTableOutput, error)
DescribeGlobalTableRequest(*dynamodb.DescribeGlobalTableInput) (*request.Request, *dynamodb.DescribeGlobalTableOutput)
DescribeGlobalTableSettings(*dynamodb.DescribeGlobalTableSettingsInput) (*dynamodb.DescribeGlobalTableSettingsOutput, error)
DescribeGlobalTableSettingsWithContext(aws.Context, *dynamodb.DescribeGlobalTableSettingsInput, ...request.Option) (*dynamodb.DescribeGlobalTableSettingsOutput, error)
DescribeGlobalTableSettingsRequest(*dynamodb.DescribeGlobalTableSettingsInput) (*request.Request, *dynamodb.DescribeGlobalTableSettingsOutput)
DescribeLimits(*dynamodb.DescribeLimitsInput) (*dynamodb.DescribeLimitsOutput, error)
DescribeLimitsWithContext(aws.Context, *dynamodb.DescribeLimitsInput, ...request.Option) (*dynamodb.DescribeLimitsOutput, error)
DescribeLimitsRequest(*dynamodb.DescribeLimitsInput) (*request.Request, *dynamodb.DescribeLimitsOutput)
DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error)
DescribeTableWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.Option) (*dynamodb.DescribeTableOutput, error)
DescribeTableRequest(*dynamodb.DescribeTableInput) (*request.Request, *dynamodb.DescribeTableOutput)
DescribeTimeToLive(*dynamodb.DescribeTimeToLiveInput) (*dynamodb.DescribeTimeToLiveOutput, error)
DescribeTimeToLiveWithContext(aws.Context, *dynamodb.DescribeTimeToLiveInput, ...request.Option) (*dynamodb.DescribeTimeToLiveOutput, error)
DescribeTimeToLiveRequest(*dynamodb.DescribeTimeToLiveInput) (*request.Request, *dynamodb.DescribeTimeToLiveOutput)
GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error)
GetItemWithContext(aws.Context, *dynamodb.GetItemInput, ...request.Option) (*dynamodb.GetItemOutput, error)
GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput)
ListBackups(*dynamodb.ListBackupsInput) (*dynamodb.ListBackupsOutput, error)
ListBackupsWithContext(aws.Context, *dynamodb.ListBackupsInput, ...request.Option) (*dynamodb.ListBackupsOutput, error)
ListBackupsRequest(*dynamodb.ListBackupsInput) (*request.Request, *dynamodb.ListBackupsOutput)
ListGlobalTables(*dynamodb.ListGlobalTablesInput) (*dynamodb.ListGlobalTablesOutput, error)
ListGlobalTablesWithContext(aws.Context, *dynamodb.ListGlobalTablesInput, ...request.Option) (*dynamodb.ListGlobalTablesOutput, error)
ListGlobalTablesRequest(*dynamodb.ListGlobalTablesInput) (*request.Request, *dynamodb.ListGlobalTablesOutput)
ListTables(*dynamodb.ListTablesInput) (*dynamodb.ListTablesOutput, error)
ListTablesWithContext(aws.Context, *dynamodb.ListTablesInput, ...request.Option) (*dynamodb.ListTablesOutput, error)
ListTablesRequest(*dynamodb.ListTablesInput) (*request.Request, *dynamodb.ListTablesOutput)
ListTablesPages(*dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool) error
ListTablesPagesWithContext(aws.Context, *dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool, ...request.Option) error
ListTagsOfResource(*dynamodb.ListTagsOfResourceInput) (*dynamodb.ListTagsOfResourceOutput, error)
ListTagsOfResourceWithContext(aws.Context, *dynamodb.ListTagsOfResourceInput, ...request.Option) (*dynamodb.ListTagsOfResourceOutput, error)
ListTagsOfResourceRequest(*dynamodb.ListTagsOfResourceInput) (*request.Request, *dynamodb.ListTagsOfResourceOutput)
PutItem(*dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error)
PutItemWithContext(aws.Context, *dynamodb.PutItemInput, ...request.Option) (*dynamodb.PutItemOutput, error)
PutItemRequest(*dynamodb.PutItemInput) (*request.Request, *dynamodb.PutItemOutput)
Query(*dynamodb.QueryInput) (*dynamodb.QueryOutput, error)
QueryWithContext(aws.Context, *dynamodb.QueryInput, ...request.Option) (*dynamodb.QueryOutput, error)
QueryRequest(*dynamodb.QueryInput) (*request.Request, *dynamodb.QueryOutput)
QueryPages(*dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool) error
QueryPagesWithContext(aws.Context, *dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool, ...request.Option) error
RestoreTableFromBackup(*dynamodb.RestoreTableFromBackupInput) (*dynamodb.RestoreTableFromBackupOutput, error)
RestoreTableFromBackupWithContext(aws.Context, *dynamodb.RestoreTableFromBackupInput, ...request.Option) (*dynamodb.RestoreTableFromBackupOutput, error)
RestoreTableFromBackupRequest(*dynamodb.RestoreTableFromBackupInput) (*request.Request, *dynamodb.RestoreTableFromBackupOutput)
RestoreTableToPointInTime(*dynamodb.RestoreTableToPointInTimeInput) (*dynamodb.RestoreTableToPointInTimeOutput, error)
RestoreTableToPointInTimeWithContext(aws.Context, *dynamodb.RestoreTableToPointInTimeInput, ...request.Option) (*dynamodb.RestoreTableToPointInTimeOutput, error)
RestoreTableToPointInTimeRequest(*dynamodb.RestoreTableToPointInTimeInput) (*request.Request, *dynamodb.RestoreTableToPointInTimeOutput)
Scan(*dynamodb.ScanInput) (*dynamodb.ScanOutput, error)
ScanWithContext(aws.Context, *dynamodb.ScanInput, ...request.Option) (*dynamodb.ScanOutput, error)
ScanRequest(*dynamodb.ScanInput) (*request.Request, *dynamodb.ScanOutput)
ScanPages(*dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool) error
ScanPagesWithContext(aws.Context, *dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool, ...request.Option) error
TagResource(*dynamodb.TagResourceInput) (*dynamodb.TagResourceOutput, error)
TagResourceWithContext(aws.Context, *dynamodb.TagResourceInput, ...request.Option) (*dynamodb.TagResourceOutput, error)
TagResourceRequest(*dynamodb.TagResourceInput) (*request.Request, *dynamodb.TagResourceOutput)
UntagResource(*dynamodb.UntagResourceInput) (*dynamodb.UntagResourceOutput, error)
UntagResourceWithContext(aws.Context, *dynamodb.UntagResourceInput, ...request.Option) (*dynamodb.UntagResourceOutput, error)
UntagResourceRequest(*dynamodb.UntagResourceInput) (*request.Request, *dynamodb.UntagResourceOutput)
UpdateContinuousBackups(*dynamodb.UpdateContinuousBackupsInput) (*dynamodb.UpdateContinuousBackupsOutput, error)
UpdateContinuousBackupsWithContext(aws.Context, *dynamodb.UpdateContinuousBackupsInput, ...request.Option) (*dynamodb.UpdateContinuousBackupsOutput, error)
UpdateContinuousBackupsRequest(*dynamodb.UpdateContinuousBackupsInput) (*request.Request, *dynamodb.UpdateContinuousBackupsOutput)
UpdateGlobalTable(*dynamodb.UpdateGlobalTableInput) (*dynamodb.UpdateGlobalTableOutput, error)
UpdateGlobalTableWithContext(aws.Context, *dynamodb.UpdateGlobalTableInput, ...request.Option) (*dynamodb.UpdateGlobalTableOutput, error)
UpdateGlobalTableRequest(*dynamodb.UpdateGlobalTableInput) (*request.Request, *dynamodb.UpdateGlobalTableOutput)
UpdateGlobalTableSettings(*dynamodb.UpdateGlobalTableSettingsInput) (*dynamodb.UpdateGlobalTableSettingsOutput, error)
UpdateGlobalTableSettingsWithContext(aws.Context, *dynamodb.UpdateGlobalTableSettingsInput, ...request.Option) (*dynamodb.UpdateGlobalTableSettingsOutput, error)
UpdateGlobalTableSettingsRequest(*dynamodb.UpdateGlobalTableSettingsInput) (*request.Request, *dynamodb.UpdateGlobalTableSettingsOutput)
UpdateItem(*dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error)
UpdateItemWithContext(aws.Context, *dynamodb.UpdateItemInput, ...request.Option) (*dynamodb.UpdateItemOutput, error)
UpdateItemRequest(*dynamodb.UpdateItemInput) (*request.Request, *dynamodb.UpdateItemOutput)
UpdateTable(*dynamodb.UpdateTableInput) (*dynamodb.UpdateTableOutput, error)
UpdateTableWithContext(aws.Context, *dynamodb.UpdateTableInput, ...request.Option) (*dynamodb.UpdateTableOutput, error)
UpdateTableRequest(*dynamodb.UpdateTableInput) (*request.Request, *dynamodb.UpdateTableOutput)
UpdateTimeToLive(*dynamodb.UpdateTimeToLiveInput) (*dynamodb.UpdateTimeToLiveOutput, error)
UpdateTimeToLiveWithContext(aws.Context, *dynamodb.UpdateTimeToLiveInput, ...request.Option) (*dynamodb.UpdateTimeToLiveOutput, error)
UpdateTimeToLiveRequest(*dynamodb.UpdateTimeToLiveInput) (*request.Request, *dynamodb.UpdateTimeToLiveOutput)
WaitUntilTableExists(*dynamodb.DescribeTableInput) error
WaitUntilTableExistsWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.WaiterOption) error
WaitUntilTableNotExists(*dynamodb.DescribeTableInput) error
WaitUntilTableNotExistsWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.WaiterOption) error
}
var _ DynamoDBAPI = (*dynamodb.DynamoDB)(nil)

View file

@ -1,149 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package dynamodb
const (
// ErrCodeBackupInUseException for service response error code
// "BackupInUseException".
//
// There is another ongoing conflicting backup control plane operation on the
// table. The backups is either being created, deleted or restored to a table.
ErrCodeBackupInUseException = "BackupInUseException"
// ErrCodeBackupNotFoundException for service response error code
// "BackupNotFoundException".
//
// Backup not found for the given BackupARN.
ErrCodeBackupNotFoundException = "BackupNotFoundException"
// ErrCodeConditionalCheckFailedException for service response error code
// "ConditionalCheckFailedException".
//
// A condition specified in the operation could not be evaluated.
ErrCodeConditionalCheckFailedException = "ConditionalCheckFailedException"
// ErrCodeContinuousBackupsUnavailableException for service response error code
// "ContinuousBackupsUnavailableException".
//
// Backups have not yet been enabled for this table.
ErrCodeContinuousBackupsUnavailableException = "ContinuousBackupsUnavailableException"
// ErrCodeGlobalTableAlreadyExistsException for service response error code
// "GlobalTableAlreadyExistsException".
//
// The specified global table already exists.
ErrCodeGlobalTableAlreadyExistsException = "GlobalTableAlreadyExistsException"
// ErrCodeGlobalTableNotFoundException for service response error code
// "GlobalTableNotFoundException".
//
// The specified global table does not exist.
ErrCodeGlobalTableNotFoundException = "GlobalTableNotFoundException"
// ErrCodeIndexNotFoundException for service response error code
// "IndexNotFoundException".
//
// The operation tried to access a nonexistent index.
ErrCodeIndexNotFoundException = "IndexNotFoundException"
// ErrCodeInternalServerError for service response error code
// "InternalServerError".
//
// An error occurred on the server side.
ErrCodeInternalServerError = "InternalServerError"
// ErrCodeInvalidRestoreTimeException for service response error code
// "InvalidRestoreTimeException".
//
// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime
// and LatestRestorableDateTime.
ErrCodeInvalidRestoreTimeException = "InvalidRestoreTimeException"
// ErrCodeItemCollectionSizeLimitExceededException for service response error code
// "ItemCollectionSizeLimitExceededException".
//
// An item collection is too large. This exception is only returned for tables
// that have one or more local secondary indexes.
ErrCodeItemCollectionSizeLimitExceededException = "ItemCollectionSizeLimitExceededException"
// ErrCodeLimitExceededException for service response error code
// "LimitExceededException".
//
// Up to 50 CreateBackup operations are allowed per second, per account. There
// is no limit to the number of daily on-demand backups that can be taken.
//
// Up to 10 simultaneous table operations are allowed per account. These operations
// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
// and RestoreTableToPointInTime.
//
// For tables with secondary indexes, only one of those tables can be in the
// CREATING state at any point in time. Do not attempt to create more than one
// such table simultaneously.
//
// The total limit of tables in the ACTIVE state is 250.
ErrCodeLimitExceededException = "LimitExceededException"
// ErrCodePointInTimeRecoveryUnavailableException for service response error code
// "PointInTimeRecoveryUnavailableException".
//
// Point in time recovery has not yet been enabled for this source table.
ErrCodePointInTimeRecoveryUnavailableException = "PointInTimeRecoveryUnavailableException"
// ErrCodeProvisionedThroughputExceededException for service response error code
// "ProvisionedThroughputExceededException".
//
// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
// requests that receive this exception. Your request is eventually successful,
// unless your retry queue is too large to finish. Reduce the frequency of requests
// and use exponential backoff. For more information, go to Error Retries and
// Exponential Backoff (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
// in the Amazon DynamoDB Developer Guide.
ErrCodeProvisionedThroughputExceededException = "ProvisionedThroughputExceededException"
// ErrCodeReplicaAlreadyExistsException for service response error code
// "ReplicaAlreadyExistsException".
//
// The specified replica is already part of the global table.
ErrCodeReplicaAlreadyExistsException = "ReplicaAlreadyExistsException"
// ErrCodeReplicaNotFoundException for service response error code
// "ReplicaNotFoundException".
//
// The specified replica is no longer part of the global table.
ErrCodeReplicaNotFoundException = "ReplicaNotFoundException"
// ErrCodeResourceInUseException for service response error code
// "ResourceInUseException".
//
// The operation conflicts with the resource's availability. For example, you
// attempted to recreate an existing table, or tried to delete a table currently
// in the CREATING state.
ErrCodeResourceInUseException = "ResourceInUseException"
// ErrCodeResourceNotFoundException for service response error code
// "ResourceNotFoundException".
//
// The operation tried to access a nonexistent table or index. The resource
// might not be specified correctly, or its status might not be ACTIVE.
ErrCodeResourceNotFoundException = "ResourceNotFoundException"
// ErrCodeTableAlreadyExistsException for service response error code
// "TableAlreadyExistsException".
//
// A target table with the specified name already exists.
ErrCodeTableAlreadyExistsException = "TableAlreadyExistsException"
// ErrCodeTableInUseException for service response error code
// "TableInUseException".
//
// A target table with the specified name is either being created or deleted.
ErrCodeTableInUseException = "TableInUseException"
// ErrCodeTableNotFoundException for service response error code
// "TableNotFoundException".
//
// A source table with the name TableName does not currently exist within the
// subscriber's account.
ErrCodeTableNotFoundException = "TableNotFoundException"
)

View file

@ -1,95 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package dynamodb
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
)
// DynamoDB provides the API operation methods for making requests to
// Amazon DynamoDB. See this package's package overview docs
// for details on the service.
//
// DynamoDB methods are safe to use concurrently. It is not safe to
// modify mutate any of the struct's properties though.
type DynamoDB struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "dynamodb" // Service endpoint prefix API calls made to.
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
)
// New creates a new instance of the DynamoDB client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a DynamoDB client from just a session.
// svc := dynamodb.New(mySession)
//
// // Create a DynamoDB client with additional configuration
// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DynamoDB {
svc := &DynamoDB{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
SigningName: signingName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2012-08-10",
JSONVersion: "1.0",
TargetPrefix: "DynamoDB_20120810",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a DynamoDB operation and runs any
// custom request initialization.
func (c *DynamoDB) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

View file

@ -1,107 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package dynamodb
import (
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
// WaitUntilTableExists uses the DynamoDB API operation
// DescribeTable to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error {
return c.WaitUntilTableExistsWithContext(aws.BackgroundContext(), input)
}
// WaitUntilTableExistsWithContext is an extended version of WaitUntilTableExists.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *DynamoDB) WaitUntilTableExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilTableExists",
MaxAttempts: 25,
Delay: request.ConstantWaiterDelay(20 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.PathWaiterMatch, Argument: "Table.TableStatus",
Expected: "ACTIVE",
},
{
State: request.RetryWaiterState,
Matcher: request.ErrorWaiterMatch,
Expected: "ResourceNotFoundException",
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *DescribeTableInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeTableRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilTableNotExists uses the DynamoDB API operation
// DescribeTable to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error {
return c.WaitUntilTableNotExistsWithContext(aws.BackgroundContext(), input)
}
// WaitUntilTableNotExistsWithContext is an extended version of WaitUntilTableNotExists.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *DynamoDB) WaitUntilTableNotExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilTableNotExists",
MaxAttempts: 25,
Delay: request.ConstantWaiterDelay(20 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.ErrorWaiterMatch,
Expected: "ResourceNotFoundException",
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *DescribeTableInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeTableRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}

File diff suppressed because it is too large Load diff

View file

@ -1,120 +0,0 @@
package ec2
import (
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkrand"
)
type retryer struct {
client.DefaultRetryer
}
func (d retryer) RetryRules(r *request.Request) time.Duration {
switch r.Operation.Name {
case opModifyNetworkInterfaceAttribute:
fallthrough
case opAssignPrivateIpAddresses:
return customRetryRule(r)
default:
return d.DefaultRetryer.RetryRules(r)
}
}
func customRetryRule(r *request.Request) time.Duration {
retryTimes := []time.Duration{
time.Second,
3 * time.Second,
5 * time.Second,
}
count := r.RetryCount
if count >= len(retryTimes) {
count = len(retryTimes) - 1
}
minTime := int(retryTimes[count])
return time.Duration(sdkrand.SeededRand.Intn(minTime) + minTime)
}
func setCustomRetryer(c *client.Client) {
maxRetries := aws.IntValue(c.Config.MaxRetries)
if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
maxRetries = 3
}
c.Retryer = retryer{
DefaultRetryer: client.DefaultRetryer{
NumMaxRetries: maxRetries,
},
}
}
func init() {
initClient = func(c *client.Client) {
if c.Config.Retryer == nil {
// Only override the retryer with a custom one if the config
// does not already contain a retryer
setCustomRetryer(c)
}
}
initRequest = func(r *request.Request) {
if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter
r.Handlers.Build.PushFront(fillPresignedURL)
}
}
}
func fillPresignedURL(r *request.Request) {
if !r.ParamsFilled() {
return
}
origParams := r.Params.(*CopySnapshotInput)
// Stop if PresignedURL/DestinationRegion is set
if origParams.PresignedUrl != nil || origParams.DestinationRegion != nil {
return
}
origParams.DestinationRegion = r.Config.Region
newParams := awsutil.CopyOf(r.Params).(*CopySnapshotInput)
// Create a new request based on the existing request. We will use this to
// presign the CopySnapshot request against the source region.
cfg := r.Config.Copy(aws.NewConfig().
WithEndpoint("").
WithRegion(aws.StringValue(origParams.SourceRegion)))
clientInfo := r.ClientInfo
resolved, err := r.Config.EndpointResolver.EndpointFor(
clientInfo.ServiceName, aws.StringValue(cfg.Region),
func(opt *endpoints.Options) {
opt.DisableSSL = aws.BoolValue(cfg.DisableSSL)
opt.UseDualStack = aws.BoolValue(cfg.UseDualStack)
},
)
if err != nil {
r.Error = err
return
}
clientInfo.Endpoint = resolved.URL
clientInfo.SigningRegion = resolved.SigningRegion
// Presign a CopySnapshot request with modified params
req := request.New(*cfg, clientInfo, r.Handlers, r.Retryer, r.Operation, newParams, r.Data)
url, err := req.Presign(5 * time.Minute) // 5 minutes should be enough.
if err != nil { // bubble error back up to original request
r.Error = err
return
}
// We have our URL, set it on params
origParams.PresignedUrl = &url
}

View file

@ -1,30 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package ec2 provides the client and types for making API
// requests to Amazon Elastic Compute Cloud.
//
// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity
// in the AWS Cloud. Using Amazon EC2 eliminates the need to invest in hardware
// up front, so you can develop and deploy applications faster.
//
// See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service.
//
// See ec2 package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/
//
// Using the Client
//
// To contact Amazon Elastic Compute Cloud with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
//
// See the SDK's documentation for more information on how to use the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the Amazon Elastic Compute Cloud client EC2 for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/#New
package ec2

View file

@ -1,3 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package ec2

View file

@ -1,93 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package ec2
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/ec2query"
)
// EC2 provides the API operation methods for making requests to
// Amazon Elastic Compute Cloud. See this package's package overview docs
// for details on the service.
//
// EC2 methods are safe to use concurrently. It is not safe to
// modify mutate any of the struct's properties though.
type EC2 struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "ec2" // Service endpoint prefix API calls made to.
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
)
// New creates a new instance of the EC2 client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a EC2 client from just a session.
// svc := ec2.New(mySession)
//
// // Create a EC2 client with additional configuration
// svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EC2 {
svc := &EC2{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
SigningName: signingName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2016-11-15",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(ec2query.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a EC2 operation and runs any
// custom request initialization.
func (c *EC2) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,45 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package ecs provides the client and types for making API
// requests to Amazon EC2 Container Service.
//
// Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast,
// container management service that makes it easy to run, stop, and manage
// Docker containers on a cluster. You can host your cluster on a serverless
// infrastructure that is managed by Amazon ECS by launching your services or
// tasks using the Fargate launch type. For more control, you can host your
// tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances
// that you manage by using the EC2 launch type. For more information about
// launch types, see Amazon ECS Launch Types (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html).
//
// Amazon ECS lets you launch and stop container-based applications with simple
// API calls, allows you to get the state of your cluster from a centralized
// service, and gives you access to many familiar Amazon EC2 features.
//
// You can use Amazon ECS to schedule the placement of containers across your
// cluster based on your resource needs, isolation policies, and availability
// requirements. Amazon ECS eliminates the need for you to operate your own
// cluster management and configuration management systems or worry about scaling
// your management infrastructure.
//
// See https://docs.aws.amazon.com/goto/WebAPI/ecs-2014-11-13 for more information on this service.
//
// See ecs package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/ecs/
//
// Using the Client
//
// To contact Amazon EC2 Container Service with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
//
// See the SDK's documentation for more information on how to use the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the Amazon EC2 Container Service client ECS for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/ecs/#New
package ecs

View file

@ -1,145 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package ecs
const (
// ErrCodeAccessDeniedException for service response error code
// "AccessDeniedException".
//
// You do not have authorization to perform the requested action.
ErrCodeAccessDeniedException = "AccessDeniedException"
// ErrCodeAttributeLimitExceededException for service response error code
// "AttributeLimitExceededException".
//
// You can apply up to 10 custom attributes per resource. You can view the attributes
// of a resource with ListAttributes. You can remove existing attributes on
// a resource with DeleteAttributes.
ErrCodeAttributeLimitExceededException = "AttributeLimitExceededException"
// ErrCodeBlockedException for service response error code
// "BlockedException".
//
// Your AWS account has been blocked. Contact AWS Support (http://aws.amazon.com/contact-us/)
// for more information.
ErrCodeBlockedException = "BlockedException"
// ErrCodeClientException for service response error code
// "ClientException".
//
// These errors are usually caused by a client action, such as using an action
// or resource on behalf of a user that doesn't have permissions to use the
// action or resource, or specifying an identifier that is not valid.
ErrCodeClientException = "ClientException"
// ErrCodeClusterContainsContainerInstancesException for service response error code
// "ClusterContainsContainerInstancesException".
//
// You cannot delete a cluster that has registered container instances. You
// must first deregister the container instances before you can delete the cluster.
// For more information, see DeregisterContainerInstance.
ErrCodeClusterContainsContainerInstancesException = "ClusterContainsContainerInstancesException"
// ErrCodeClusterContainsServicesException for service response error code
// "ClusterContainsServicesException".
//
// You cannot delete a cluster that contains services. You must first update
// the service to reduce its desired task count to 0 and then delete the service.
// For more information, see UpdateService and DeleteService.
ErrCodeClusterContainsServicesException = "ClusterContainsServicesException"
// ErrCodeClusterContainsTasksException for service response error code
// "ClusterContainsTasksException".
//
// You cannot delete a cluster that has active tasks.
ErrCodeClusterContainsTasksException = "ClusterContainsTasksException"
// ErrCodeClusterNotFoundException for service response error code
// "ClusterNotFoundException".
//
// The specified cluster could not be found. You can view your available clusters
// with ListClusters. Amazon ECS clusters are region-specific.
ErrCodeClusterNotFoundException = "ClusterNotFoundException"
// ErrCodeInvalidParameterException for service response error code
// "InvalidParameterException".
//
// The specified parameter is invalid. Review the available parameters for the
// API request.
ErrCodeInvalidParameterException = "InvalidParameterException"
// ErrCodeMissingVersionException for service response error code
// "MissingVersionException".
//
// Amazon ECS is unable to determine the current version of the Amazon ECS container
// agent on the container instance and does not have enough information to proceed
// with an update. This could be because the agent running on the container
// instance is an older or custom version that does not use our version information.
ErrCodeMissingVersionException = "MissingVersionException"
// ErrCodeNoUpdateAvailableException for service response error code
// "NoUpdateAvailableException".
//
// There is no update available for this Amazon ECS container agent. This could
// be because the agent is already running the latest version, or it is so old
// that there is no update path to the current version.
ErrCodeNoUpdateAvailableException = "NoUpdateAvailableException"
// ErrCodePlatformTaskDefinitionIncompatibilityException for service response error code
// "PlatformTaskDefinitionIncompatibilityException".
//
// The specified platform version does not satisfy the task definitions required
// capabilities.
ErrCodePlatformTaskDefinitionIncompatibilityException = "PlatformTaskDefinitionIncompatibilityException"
// ErrCodePlatformUnknownException for service response error code
// "PlatformUnknownException".
//
// The specified platform version does not exist.
ErrCodePlatformUnknownException = "PlatformUnknownException"
// ErrCodeServerException for service response error code
// "ServerException".
//
// These errors are usually caused by a server issue.
ErrCodeServerException = "ServerException"
// ErrCodeServiceNotActiveException for service response error code
// "ServiceNotActiveException".
//
// The specified service is not active. You can't update a service that is inactive.
// If you have previously deleted a service, you can re-create it with CreateService.
ErrCodeServiceNotActiveException = "ServiceNotActiveException"
// ErrCodeServiceNotFoundException for service response error code
// "ServiceNotFoundException".
//
// The specified service could not be found. You can view your available services
// with ListServices. Amazon ECS services are cluster-specific and region-specific.
ErrCodeServiceNotFoundException = "ServiceNotFoundException"
// ErrCodeTargetNotFoundException for service response error code
// "TargetNotFoundException".
//
// The specified target could not be found. You can view your available container
// instances with ListContainerInstances. Amazon ECS container instances are
// cluster-specific and region-specific.
ErrCodeTargetNotFoundException = "TargetNotFoundException"
// ErrCodeUnsupportedFeatureException for service response error code
// "UnsupportedFeatureException".
//
// The specified task is not supported in this region.
ErrCodeUnsupportedFeatureException = "UnsupportedFeatureException"
// ErrCodeUpdateInProgressException for service response error code
// "UpdateInProgressException".
//
// There is already a current Amazon ECS container agent update in progress
// on the specified container instance. If the container agent becomes disconnected
// while it is in a transitional stage, such as PENDING or STAGING, the update
// process can get stuck in that state. However, when the agent reconnects,
// it resumes where it stopped previously.
ErrCodeUpdateInProgressException = "UpdateInProgressException"
)

View file

@ -1,95 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package ecs
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
)
// ECS provides the API operation methods for making requests to
// Amazon EC2 Container Service. See this package's package overview docs
// for details on the service.
//
// ECS methods are safe to use concurrently. It is not safe to
// modify mutate any of the struct's properties though.
type ECS struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "ecs" // Service endpoint prefix API calls made to.
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
)
// New creates a new instance of the ECS client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a ECS client from just a session.
// svc := ecs.New(mySession)
//
// // Create a ECS client with additional configuration
// svc := ecs.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECS {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ECS {
svc := &ECS{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
SigningName: signingName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2014-11-13",
JSONVersion: "1.1",
TargetPrefix: "AmazonEC2ContainerServiceV20141113",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a ECS operation and runs any
// custom request initialization.
func (c *ECS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

View file

@ -1,224 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package ecs
import (
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
// WaitUntilServicesInactive uses the Amazon ECS API operation
// DescribeServices to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *ECS) WaitUntilServicesInactive(input *DescribeServicesInput) error {
return c.WaitUntilServicesInactiveWithContext(aws.BackgroundContext(), input)
}
// WaitUntilServicesInactiveWithContext is an extended version of WaitUntilServicesInactive.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ECS) WaitUntilServicesInactiveWithContext(ctx aws.Context, input *DescribeServicesInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilServicesInactive",
MaxAttempts: 40,
Delay: request.ConstantWaiterDelay(15 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.FailureWaiterState,
Matcher: request.PathAnyWaiterMatch, Argument: "failures[].reason",
Expected: "MISSING",
},
{
State: request.SuccessWaiterState,
Matcher: request.PathAnyWaiterMatch, Argument: "services[].status",
Expected: "INACTIVE",
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *DescribeServicesInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeServicesRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilServicesStable uses the Amazon ECS API operation
// DescribeServices to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *ECS) WaitUntilServicesStable(input *DescribeServicesInput) error {
return c.WaitUntilServicesStableWithContext(aws.BackgroundContext(), input)
}
// WaitUntilServicesStableWithContext is an extended version of WaitUntilServicesStable.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ECS) WaitUntilServicesStableWithContext(ctx aws.Context, input *DescribeServicesInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilServicesStable",
MaxAttempts: 40,
Delay: request.ConstantWaiterDelay(15 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.FailureWaiterState,
Matcher: request.PathAnyWaiterMatch, Argument: "failures[].reason",
Expected: "MISSING",
},
{
State: request.FailureWaiterState,
Matcher: request.PathAnyWaiterMatch, Argument: "services[].status",
Expected: "DRAINING",
},
{
State: request.FailureWaiterState,
Matcher: request.PathAnyWaiterMatch, Argument: "services[].status",
Expected: "INACTIVE",
},
{
State: request.SuccessWaiterState,
Matcher: request.PathWaiterMatch, Argument: "length(services[?!(length(deployments) == `1` && runningCount == desiredCount)]) == `0`",
Expected: true,
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *DescribeServicesInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeServicesRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilTasksRunning uses the Amazon ECS API operation
// DescribeTasks to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *ECS) WaitUntilTasksRunning(input *DescribeTasksInput) error {
return c.WaitUntilTasksRunningWithContext(aws.BackgroundContext(), input)
}
// WaitUntilTasksRunningWithContext is an extended version of WaitUntilTasksRunning.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ECS) WaitUntilTasksRunningWithContext(ctx aws.Context, input *DescribeTasksInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilTasksRunning",
MaxAttempts: 100,
Delay: request.ConstantWaiterDelay(6 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.FailureWaiterState,
Matcher: request.PathAnyWaiterMatch, Argument: "tasks[].lastStatus",
Expected: "STOPPED",
},
{
State: request.FailureWaiterState,
Matcher: request.PathAnyWaiterMatch, Argument: "failures[].reason",
Expected: "MISSING",
},
{
State: request.SuccessWaiterState,
Matcher: request.PathAllWaiterMatch, Argument: "tasks[].lastStatus",
Expected: "RUNNING",
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *DescribeTasksInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeTasksRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilTasksStopped uses the Amazon ECS API operation
// DescribeTasks to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *ECS) WaitUntilTasksStopped(input *DescribeTasksInput) error {
return c.WaitUntilTasksStoppedWithContext(aws.BackgroundContext(), input)
}
// WaitUntilTasksStoppedWithContext is an extended version of WaitUntilTasksStopped.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ECS) WaitUntilTasksStoppedWithContext(ctx aws.Context, input *DescribeTasksInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilTasksStopped",
MaxAttempts: 100,
Delay: request.ConstantWaiterDelay(6 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.PathAllWaiterMatch, Argument: "tasks[].lastStatus",
Expected: "STOPPED",
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *DescribeTasksInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeTasksRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}

View file

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 Ben Johnson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,10 +0,0 @@
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -1,10 +0,0 @@
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -1,28 +0,0 @@
package bolt
import "unsafe"
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned bool
func init() {
// Simple check to see whether this arch handles unaligned load/stores
// correctly.
// ARM9 and older devices require load/stores to be from/to aligned
// addresses. If not, the lower 2 bits are cleared and that address is
// read in a jumbled up order.
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
brokenUnaligned = val != 0x11222211
}

View file

@ -1,12 +0,0 @@
// +build arm64
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -1,10 +0,0 @@
package bolt
import (
"syscall"
)
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return syscall.Fdatasync(int(db.file.Fd()))
}

View file

@ -1,12 +0,0 @@
// +build mips64 mips64le
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x8000000000 // 512GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -1,12 +0,0 @@
// +build mips mipsle
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x40000000 // 1GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -1,27 +0,0 @@
package bolt
import (
"syscall"
"unsafe"
)
const (
msAsync = 1 << iota // perform asynchronous writes
msSync // perform synchronous writes
msInvalidate // invalidate cached data
)
func msync(db *DB) error {
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
if errno != 0 {
return errno
}
return nil
}
func fdatasync(db *DB) error {
if db.data != nil {
return msync(db)
}
return db.file.Sync()
}

View file

@ -1,9 +0,0 @@
// +build ppc
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF

View file

@ -1,12 +0,0 @@
// +build ppc64
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -1,12 +0,0 @@
// +build ppc64le
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -1,12 +0,0 @@
// +build s390x
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -1,92 +0,0 @@
// +build !windows,!plan9,!solaris
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
)
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time
if timeout != 0 {
t = time.Now()
}
fd := db.file.Fd()
flag := syscall.LOCK_NB
if exclusive {
flag |= syscall.LOCK_EX
} else {
flag |= syscall.LOCK_SH
}
for {
// Attempt to obtain an exclusive lock.
err := syscall.Flock(int(fd), flag)
if err == nil {
return nil
} else if err != syscall.EWOULDBLOCK {
return err
}
// If we timed out then return an error.
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
return ErrTimeout
}
// Wait for a bit and try again.
time.Sleep(flockRetryTimeout)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
if err := madvise(b, syscall.MADV_RANDOM); err != nil {
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := syscall.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}
// NOTE: This function is copied from stdlib because it is not available on darwin.
func madvise(b []byte, advice int) (err error) {
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
if e1 != 0 {
err = e1
}
return
}

View file

@ -1,89 +0,0 @@
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
"golang.org/x/sys/unix"
)
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time
if timeout != 0 {
t = time.Now()
}
fd := db.file.Fd()
var lockType int16
if exclusive {
lockType = syscall.F_WRLCK
} else {
lockType = syscall.F_RDLCK
}
for {
// Attempt to obtain an exclusive lock.
lock := syscall.Flock_t{Type: lockType}
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
if err == nil {
return nil
} else if err != syscall.EAGAIN {
return err
}
// If we timed out then return an error.
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
return ErrTimeout
}
// Wait for a bit and try again.
time.Sleep(flockRetryTimeout)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_UNLCK
lock.Whence = 0
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := unix.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}

View file

@ -1,145 +0,0 @@
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
)
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procLockFileEx = modkernel32.NewProc("LockFileEx")
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
)
const (
lockExt = ".lock"
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
flagLockExclusive = 2
flagLockFailImmediately = 1
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
errLockViolation syscall.Errno = 0x21
)
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
if r == 0 {
return err
}
return nil
}
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
if r == 0 {
return err
}
return nil
}
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return db.file.Sync()
}
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
// Create a separate lock file on windows because a process
// cannot share an exclusive lock on the same file. This is
// needed during Tx.WriteTo().
f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
if err != nil {
return err
}
db.lockfile = f
var t time.Time
if timeout != 0 {
t = time.Now()
}
fd := f.Fd()
var flag uint32 = flagLockFailImmediately
if exclusive {
flag |= flagLockExclusive
}
for {
// Attempt to obtain an exclusive lock.
err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, &syscall.Overlapped{})
if err == nil {
return nil
} else if err != errLockViolation {
return err
}
// If we timed oumercit then return an error.
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
return ErrTimeout
}
// Wait for a bit and try again.
time.Sleep(flockRetryTimeout)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
db.lockfile.Close()
os.Remove(db.path + lockExt)
return err
}
// mmap memory maps a DB's data file.
// Based on: https://github.com/edsrzf/mmap-go
func mmap(db *DB, sz int) error {
if !db.readOnly {
// Truncate the database to the size of the mmap.
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("truncate: %s", err)
}
}
// Open a file mapping handle.
sizelo := uint32(sz >> 32)
sizehi := uint32(sz) & 0xffffffff
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
if h == 0 {
return os.NewSyscallError("CreateFileMapping", errno)
}
// Create the memory map.
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
if addr == 0 {
return os.NewSyscallError("MapViewOfFile", errno)
}
// Close mapping handle.
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
return os.NewSyscallError("CloseHandle", err)
}
// Convert to a byte array.
db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
db.datasz = sz
return nil
}
// munmap unmaps a pointer from a file.
// Based on: https://github.com/edsrzf/mmap-go
func munmap(db *DB) error {
if db.data == nil {
return nil
}
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
if err := syscall.UnmapViewOfFile(addr); err != nil {
return os.NewSyscallError("UnmapViewOfFile", err)
}
return nil
}

View file

@ -1,8 +0,0 @@
// +build !windows,!plan9,!linux,!openbsd
package bolt
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return db.file.Sync()
}

View file

@ -1,775 +0,0 @@
package bolt
import (
"bytes"
"fmt"
"unsafe"
)
const (
// MaxKeySize is the maximum length of a key, in bytes.
MaxKeySize = 32768
// MaxValueSize is the maximum length of a value, in bytes.
MaxValueSize = (1 << 31) - 2
)
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
const (
minFillPercent = 0.1
maxFillPercent = 1.0
)
// DefaultFillPercent is the percentage that split pages are filled.
// This value can be changed by setting Bucket.FillPercent.
const DefaultFillPercent = 0.5
// Bucket represents a collection of key/value pairs inside the database.
type Bucket struct {
*bucket
tx *Tx // the associated transaction
buckets map[string]*Bucket // subbucket cache
page *page // inline page reference
rootNode *node // materialized node for the root page.
nodes map[pgid]*node // node cache
// Sets the threshold for filling nodes when they split. By default,
// the bucket will fill to 50% but it can be useful to increase this
// amount if you know that your write workloads are mostly append-only.
//
// This is non-persisted across transactions so it must be set in every Tx.
FillPercent float64
}
// bucket represents the on-file representation of a bucket.
// This is stored as the "value" of a bucket key. If the bucket is small enough,
// then its root page can be stored inline in the "value", after the bucket
// header. In the case of inline buckets, the "root" will be 0.
type bucket struct {
root pgid // page id of the bucket's root-level page
sequence uint64 // monotonically incrementing, used by NextSequence()
}
// newBucket returns a new bucket associated with a transaction.
func newBucket(tx *Tx) Bucket {
var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
if tx.writable {
b.buckets = make(map[string]*Bucket)
b.nodes = make(map[pgid]*node)
}
return b
}
// Tx returns the tx of the bucket.
func (b *Bucket) Tx() *Tx {
return b.tx
}
// Root returns the root of the bucket.
func (b *Bucket) Root() pgid {
return b.root
}
// Writable returns whether the bucket is writable.
func (b *Bucket) Writable() bool {
return b.tx.writable
}
// Cursor creates a cursor associated with the bucket.
// The cursor is only valid as long as the transaction is open.
// Do not use a cursor after the transaction is closed.
func (b *Bucket) Cursor() *Cursor {
// Update transaction statistics.
b.tx.stats.CursorCount++
// Allocate and return a cursor.
return &Cursor{
bucket: b,
stack: make([]elemRef, 0),
}
}
// Bucket retrieves a nested bucket by name.
// Returns nil if the bucket does not exist.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) Bucket(name []byte) *Bucket {
if b.buckets != nil {
if child := b.buckets[string(name)]; child != nil {
return child
}
}
// Move cursor to key.
c := b.Cursor()
k, v, flags := c.seek(name)
// Return nil if the key doesn't exist or it is not a bucket.
if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
return nil
}
// Otherwise create a bucket and cache it.
var child = b.openBucket(v)
if b.buckets != nil {
b.buckets[string(name)] = child
}
return child
}
// Helper method that re-interprets a sub-bucket value
// from a parent into a Bucket
func (b *Bucket) openBucket(value []byte) *Bucket {
var child = newBucket(b.tx)
// If unaligned load/stores are broken on this arch and value is
// unaligned simply clone to an aligned byte array.
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
if unaligned {
value = cloneBytes(value)
}
// If this is a writable transaction then we need to copy the bucket entry.
// Read-only transactions can point directly at the mmap entry.
if b.tx.writable && !unaligned {
child.bucket = &bucket{}
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
} else {
child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
}
// Save a reference to the inline page if the bucket is inline.
if child.root == 0 {
child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
}
return &child
}
// CreateBucket creates a new bucket at the given key and returns the new bucket.
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
if b.tx.db == nil {
return nil, ErrTxClosed
} else if !b.tx.writable {
return nil, ErrTxNotWritable
} else if len(key) == 0 {
return nil, ErrBucketNameRequired
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return an error if there is an existing key.
if bytes.Equal(key, k) {
if (flags & bucketLeafFlag) != 0 {
return nil, ErrBucketExists
}
return nil, ErrIncompatibleValue
}
// Create empty, inline bucket.
var bucket = Bucket{
bucket: &bucket{},
rootNode: &node{isLeaf: true},
FillPercent: DefaultFillPercent,
}
var value = bucket.write()
// Insert into node.
key = cloneBytes(key)
c.node().put(key, key, value, 0, bucketLeafFlag)
// Since subbuckets are not allowed on inline buckets, we need to
// dereference the inline page, if it exists. This will cause the bucket
// to be treated as a regular, non-inline bucket for the rest of the tx.
b.page = nil
return b.Bucket(key), nil
}
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
// Returns an error if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
child, err := b.CreateBucket(key)
if err == ErrBucketExists {
return b.Bucket(key), nil
} else if err != nil {
return nil, err
}
return child, nil
}
// DeleteBucket deletes a bucket at the given key.
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
func (b *Bucket) DeleteBucket(key []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return an error if bucket doesn't exist or is not a bucket.
if !bytes.Equal(key, k) {
return ErrBucketNotFound
} else if (flags & bucketLeafFlag) == 0 {
return ErrIncompatibleValue
}
// Recursively delete all child buckets.
child := b.Bucket(key)
err := child.ForEach(func(k, v []byte) error {
if v == nil {
if err := child.DeleteBucket(k); err != nil {
return fmt.Errorf("delete bucket: %s", err)
}
}
return nil
})
if err != nil {
return err
}
// Remove cached copy.
delete(b.buckets, string(key))
// Release all bucket pages to freelist.
child.nodes = nil
child.rootNode = nil
child.free()
// Delete the node if we have a matching key.
c.node().del(key)
return nil
}
// Get retrieves the value for a key in the bucket.
// Returns a nil value if the key does not exist or if the key is a nested bucket.
// The returned value is only valid for the life of the transaction.
func (b *Bucket) Get(key []byte) []byte {
k, v, flags := b.Cursor().seek(key)
// Return nil if this is a bucket.
if (flags & bucketLeafFlag) != 0 {
return nil
}
// If our target node isn't the same key as what's passed in then return nil.
if !bytes.Equal(key, k) {
return nil
}
return v
}
// Put sets the value for a key in the bucket.
// If the key exist then its previous value will be overwritten.
// Supplied value must remain valid for the life of the transaction.
// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
func (b *Bucket) Put(key []byte, value []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
} else if len(key) == 0 {
return ErrKeyRequired
} else if len(key) > MaxKeySize {
return ErrKeyTooLarge
} else if int64(len(value)) > MaxValueSize {
return ErrValueTooLarge
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return an error if there is an existing key with a bucket value.
if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
return ErrIncompatibleValue
}
// Insert into node.
key = cloneBytes(key)
c.node().put(key, key, value, 0, 0)
return nil
}
// Delete removes a key from the bucket.
// If the key does not exist then nothing is done and a nil error is returned.
// Returns an error if the bucket was created from a read-only transaction.
func (b *Bucket) Delete(key []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return nil if the key doesn't exist.
if !bytes.Equal(key, k) {
return nil
}
// Return an error if there is already existing bucket value.
if (flags & bucketLeafFlag) != 0 {
return ErrIncompatibleValue
}
// Delete the node if we have a matching key.
c.node().del(key)
return nil
}
// Sequence returns the current integer for the bucket without incrementing it.
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
// SetSequence updates the sequence number for the bucket.
func (b *Bucket) SetSequence(v uint64) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence = v
return nil
}
// NextSequence returns an autoincrementing integer for the bucket.
func (b *Bucket) NextSequence() (uint64, error) {
if b.tx.db == nil {
return 0, ErrTxClosed
} else if !b.Writable() {
return 0, ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence++
return b.bucket.sequence, nil
}
// ForEach executes a function for each key/value pair in a bucket.
// If the provided function returns an error then the iteration is stopped and
// the error is returned to the caller. The provided function must not modify
// the bucket; this will result in undefined behavior.
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
if b.tx.db == nil {
return ErrTxClosed
}
c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if err := fn(k, v); err != nil {
return err
}
}
return nil
}
// Stat returns stats on a bucket.
func (b *Bucket) Stats() BucketStats {
var s, subStats BucketStats
pageSize := b.tx.db.pageSize
s.BucketN += 1
if b.root == 0 {
s.InlineBucketN += 1
}
b.forEachPage(func(p *page, depth int) {
if (p.flags & leafPageFlag) != 0 {
s.KeyN += int(p.count)
// used totals the used bytes for the page
used := pageHeaderSize
if p.count != 0 {
// If page has any elements, add all element headers.
used += leafPageElementSize * int(p.count-1)
// Add all element key, value sizes.
// The computation takes advantage of the fact that the position
// of the last element's key/value equals to the total of the sizes
// of all previous elements' keys and values.
// It also includes the last element's header.
lastElement := p.leafPageElement(p.count - 1)
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
}
if b.root == 0 {
// For inlined bucket just update the inline stats
s.InlineBucketInuse += used
} else {
// For non-inlined bucket update all the leaf stats
s.LeafPageN++
s.LeafInuse += used
s.LeafOverflowN += int(p.overflow)
// Collect stats from sub-buckets.
// Do that by iterating over all element headers
// looking for the ones with the bucketLeafFlag.
for i := uint16(0); i < p.count; i++ {
e := p.leafPageElement(i)
if (e.flags & bucketLeafFlag) != 0 {
// For any bucket element, open the element value
// and recursively call Stats on the contained bucket.
subStats.Add(b.openBucket(e.value()).Stats())
}
}
}
} else if (p.flags & branchPageFlag) != 0 {
s.BranchPageN++
lastElement := p.branchPageElement(p.count - 1)
// used totals the used bytes for the page
// Add header and all element headers.
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
// Add size of all keys and values.
// Again, use the fact that last element's position equals to
// the total of key, value sizes of all previous elements.
used += int(lastElement.pos + lastElement.ksize)
s.BranchInuse += used
s.BranchOverflowN += int(p.overflow)
}
// Keep track of maximum page depth.
if depth+1 > s.Depth {
s.Depth = (depth + 1)
}
})
// Alloc stats can be computed from page counts and pageSize.
s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
// Add the max depth of sub-buckets to get total nested depth.
s.Depth += subStats.Depth
// Add the stats for all sub-buckets
s.Add(subStats)
return s
}
// forEachPage iterates over every page in a bucket, including inline pages.
func (b *Bucket) forEachPage(fn func(*page, int)) {
// If we have an inline page then just use that.
if b.page != nil {
fn(b.page, 0)
return
}
// Otherwise traverse the page hierarchy.
b.tx.forEachPage(b.root, 0, fn)
}
// forEachPageNode iterates over every page (or node) in a bucket.
// This also includes inline pages.
func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
// If we have an inline page or root node then just use that.
if b.page != nil {
fn(b.page, nil, 0)
return
}
b._forEachPageNode(b.root, 0, fn)
}
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
var p, n = b.pageNode(pgid)
// Execute function.
fn(p, n, depth)
// Recursively loop over children.
if p != nil {
if (p.flags & branchPageFlag) != 0 {
for i := 0; i < int(p.count); i++ {
elem := p.branchPageElement(uint16(i))
b._forEachPageNode(elem.pgid, depth+1, fn)
}
}
} else {
if !n.isLeaf {
for _, inode := range n.inodes {
b._forEachPageNode(inode.pgid, depth+1, fn)
}
}
}
}
// spill writes all the nodes for this bucket to dirty pages.
func (b *Bucket) spill() error {
// Spill all child buckets first.
for name, child := range b.buckets {
// If the child bucket is small enough and it has no child buckets then
// write it inline into the parent bucket's page. Otherwise spill it
// like a normal bucket and make the parent value a pointer to the page.
var value []byte
if child.inlineable() {
child.free()
value = child.write()
} else {
if err := child.spill(); err != nil {
return err
}
// Update the child bucket header in this bucket.
value = make([]byte, unsafe.Sizeof(bucket{}))
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
*bucket = *child.bucket
}
// Skip writing the bucket if there are no materialized nodes.
if child.rootNode == nil {
continue
}
// Update parent node.
var c = b.Cursor()
k, _, flags := c.seek([]byte(name))
if !bytes.Equal([]byte(name), k) {
panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
}
if flags&bucketLeafFlag == 0 {
panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
}
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
}
// Ignore if there's not a materialized root node.
if b.rootNode == nil {
return nil
}
// Spill nodes.
if err := b.rootNode.spill(); err != nil {
return err
}
b.rootNode = b.rootNode.root()
// Update the root node for this bucket.
if b.rootNode.pgid >= b.tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
}
b.root = b.rootNode.pgid
return nil
}
// inlineable returns true if a bucket is small enough to be written inline
// and if it contains no subbuckets. Otherwise returns false.
func (b *Bucket) inlineable() bool {
var n = b.rootNode
// Bucket must only contain a single leaf node.
if n == nil || !n.isLeaf {
return false
}
// Bucket is not inlineable if it contains subbuckets or if it goes beyond
// our threshold for inline bucket size.
var size = pageHeaderSize
for _, inode := range n.inodes {
size += leafPageElementSize + len(inode.key) + len(inode.value)
if inode.flags&bucketLeafFlag != 0 {
return false
} else if size > b.maxInlineBucketSize() {
return false
}
}
return true
}
// Returns the maximum total size of a bucket to make it a candidate for inlining.
func (b *Bucket) maxInlineBucketSize() int {
return b.tx.db.pageSize / 4
}
// write allocates and writes a bucket to a byte slice.
func (b *Bucket) write() []byte {
// Allocate the appropriate size.
var n = b.rootNode
var value = make([]byte, bucketHeaderSize+n.size())
// Write a bucket header.
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
*bucket = *b.bucket
// Convert byte slice to a fake page and write the root node.
var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
n.write(p)
return value
}
// rebalance attempts to balance all nodes.
func (b *Bucket) rebalance() {
for _, n := range b.nodes {
n.rebalance()
}
for _, child := range b.buckets {
child.rebalance()
}
}
// node creates a node from a page and associates it with a given parent.
func (b *Bucket) node(pgid pgid, parent *node) *node {
_assert(b.nodes != nil, "nodes map expected")
// Retrieve node if it's already been created.
if n := b.nodes[pgid]; n != nil {
return n
}
// Otherwise create a node and cache it.
n := &node{bucket: b, parent: parent}
if parent == nil {
b.rootNode = n
} else {
parent.children = append(parent.children, n)
}
// Use the inline page if this is an inline bucket.
var p = b.page
if p == nil {
p = b.tx.page(pgid)
}
// Read the page into the node and cache it.
n.read(p)
b.nodes[pgid] = n
// Update statistics.
b.tx.stats.NodeCount++
return n
}
// free recursively frees all pages in the bucket.
func (b *Bucket) free() {
if b.root == 0 {
return
}
var tx = b.tx
b.forEachPageNode(func(p *page, n *node, _ int) {
if p != nil {
tx.db.freelist.free(tx.meta.txid, p)
} else {
n.free()
}
})
b.root = 0
}
// dereference removes all references to the old mmap.
func (b *Bucket) dereference() {
if b.rootNode != nil {
b.rootNode.root().dereference()
}
for _, child := range b.buckets {
child.dereference()
}
}
// pageNode returns the in-memory node, if it exists.
// Otherwise returns the underlying page.
func (b *Bucket) pageNode(id pgid) (*page, *node) {
// Inline buckets have a fake page embedded in their value so treat them
// differently. We'll return the rootNode (if available) or the fake page.
if b.root == 0 {
if id != 0 {
panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
}
if b.rootNode != nil {
return nil, b.rootNode
}
return b.page, nil
}
// Check the node cache for non-inline buckets.
if b.nodes != nil {
if n := b.nodes[id]; n != nil {
return nil, n
}
}
// Finally lookup the page from the transaction if no node is materialized.
return b.tx.page(id), nil
}
// BucketStats records statistics about resources used by a bucket.
type BucketStats struct {
// Page count statistics.
BranchPageN int // number of logical branch pages
BranchOverflowN int // number of physical branch overflow pages
LeafPageN int // number of logical leaf pages
LeafOverflowN int // number of physical leaf overflow pages
// Tree statistics.
KeyN int // number of keys/value pairs
Depth int // number of levels in B+tree
// Page size utilization.
BranchAlloc int // bytes allocated for physical branch pages
BranchInuse int // bytes actually used for branch data
LeafAlloc int // bytes allocated for physical leaf pages
LeafInuse int // bytes actually used for leaf data
// Bucket statistics
BucketN int // total number of buckets including the top bucket
InlineBucketN int // total number on inlined buckets
InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
}
func (s *BucketStats) Add(other BucketStats) {
s.BranchPageN += other.BranchPageN
s.BranchOverflowN += other.BranchOverflowN
s.LeafPageN += other.LeafPageN
s.LeafOverflowN += other.LeafOverflowN
s.KeyN += other.KeyN
if s.Depth < other.Depth {
s.Depth = other.Depth
}
s.BranchAlloc += other.BranchAlloc
s.BranchInuse += other.BranchInuse
s.LeafAlloc += other.LeafAlloc
s.LeafInuse += other.LeafInuse
s.BucketN += other.BucketN
s.InlineBucketN += other.InlineBucketN
s.InlineBucketInuse += other.InlineBucketInuse
}
// cloneBytes returns a copy of a given slice.
func cloneBytes(v []byte) []byte {
var clone = make([]byte, len(v))
copy(clone, v)
return clone
}

View file

@ -1,400 +0,0 @@
package bolt
import (
"bytes"
"fmt"
"sort"
)
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
// Cursors see nested buckets with value == nil.
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
//
// Keys and values returned from the cursor are only valid for the life of the transaction.
//
// Changing data while traversing with a cursor may cause it to be invalidated
// and return unexpected keys and/or values. You must reposition your cursor
// after mutating data.
type Cursor struct {
bucket *Bucket
stack []elemRef
}
// Bucket returns the bucket that this cursor was created from.
func (c *Cursor) Bucket() *Bucket {
return c.bucket
}
// First moves the cursor to the first item in the bucket and returns its key and value.
// If the bucket is empty then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) First() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
c.stack = c.stack[:0]
p, n := c.bucket.pageNode(c.bucket.root)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
c.first()
// If we land on an empty page then move to the next value.
// https://github.com/boltdb/bolt/issues/450
if c.stack[len(c.stack)-1].count() == 0 {
c.next()
}
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Last moves the cursor to the last item in the bucket and returns its key and value.
// If the bucket is empty then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Last() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
c.stack = c.stack[:0]
p, n := c.bucket.pageNode(c.bucket.root)
ref := elemRef{page: p, node: n}
ref.index = ref.count() - 1
c.stack = append(c.stack, ref)
c.last()
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Next moves the cursor to the next item in the bucket and returns its key and value.
// If the cursor is at the end of the bucket then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Next() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.next()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Prev() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
// Attempt to move back one element until we're successful.
// Move up the stack as we hit the beginning of each page in our stack.
for i := len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
if elem.index > 0 {
elem.index--
break
}
c.stack = c.stack[:i]
}
// If we've hit the end then return nil.
if len(c.stack) == 0 {
return nil, nil
}
// Move down the stack to find the last element of the last leaf under this branch.
c.last()
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Seek moves the cursor to a given key and returns it.
// If the key does not exist then the next key is used. If no keys
// follow, a nil key is returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
k, v, flags := c.seek(seek)
// If we ended up after the last element of a page then move to the next one.
if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
k, v, flags = c.next()
}
if k == nil {
return nil, nil
} else if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Delete removes the current key/value under the cursor from the bucket.
// Delete fails if current key/value is a bucket or if the transaction is not writable.
func (c *Cursor) Delete() error {
if c.bucket.tx.db == nil {
return ErrTxClosed
} else if !c.bucket.Writable() {
return ErrTxNotWritable
}
key, _, flags := c.keyValue()
// Return an error if current value is a bucket.
if (flags & bucketLeafFlag) != 0 {
return ErrIncompatibleValue
}
c.node().del(key)
return nil
}
// seek moves the cursor to a given key and returns it.
// If the key does not exist then the next key is used.
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
_assert(c.bucket.tx.db != nil, "tx closed")
// Start from root page/node and traverse to correct page.
c.stack = c.stack[:0]
c.search(seek, c.bucket.root)
ref := &c.stack[len(c.stack)-1]
// If the cursor is pointing to the end of page/node then return nil.
if ref.index >= ref.count() {
return nil, nil, 0
}
// If this is a bucket then return a nil value.
return c.keyValue()
}
// first moves the cursor to the first leaf element under the last page in the stack.
func (c *Cursor) first() {
for {
// Exit when we hit a leaf page.
var ref = &c.stack[len(c.stack)-1]
if ref.isLeaf() {
break
}
// Keep adding pages pointing to the first element to the stack.
var pgid pgid
if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid
} else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
}
p, n := c.bucket.pageNode(pgid)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
}
}
// last moves the cursor to the last leaf element under the last page in the stack.
func (c *Cursor) last() {
for {
// Exit when we hit a leaf page.
ref := &c.stack[len(c.stack)-1]
if ref.isLeaf() {
break
}
// Keep adding pages pointing to the last element in the stack.
var pgid pgid
if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid
} else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
}
p, n := c.bucket.pageNode(pgid)
var nextRef = elemRef{page: p, node: n}
nextRef.index = nextRef.count() - 1
c.stack = append(c.stack, nextRef)
}
}
// next moves to the next leaf element and returns the key and value.
// If the cursor is at the last leaf element then it stays there and returns nil.
func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
for {
// Attempt to move over one element until we're successful.
// Move up the stack as we hit the end of each page in our stack.
var i int
for i = len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
if elem.index < elem.count()-1 {
elem.index++
break
}
}
// If we've hit the root page then stop and return. This will leave the
// cursor on the last element of the last page.
if i == -1 {
return nil, nil, 0
}
// Otherwise start from where we left off in the stack and find the
// first element of the first leaf page.
c.stack = c.stack[:i+1]
c.first()
// If this is an empty page then restart and move back up the stack.
// https://github.com/boltdb/bolt/issues/450
if c.stack[len(c.stack)-1].count() == 0 {
continue
}
return c.keyValue()
}
}
// search recursively performs a binary search against a given page/node until it finds a given key.
func (c *Cursor) search(key []byte, pgid pgid) {
p, n := c.bucket.pageNode(pgid)
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
}
e := elemRef{page: p, node: n}
c.stack = append(c.stack, e)
// If we're on a leaf page/node then find the specific node.
if e.isLeaf() {
c.nsearch(key)
return
}
if n != nil {
c.searchNode(key, n)
return
}
c.searchPage(key, p)
}
func (c *Cursor) searchNode(key []byte, n *node) {
var exact bool
index := sort.Search(len(n.inodes), func(i int) bool {
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
ret := bytes.Compare(n.inodes[i].key, key)
if ret == 0 {
exact = true
}
return ret != -1
})
if !exact && index > 0 {
index--
}
c.stack[len(c.stack)-1].index = index
// Recursively search to the next page.
c.search(key, n.inodes[index].pgid)
}
func (c *Cursor) searchPage(key []byte, p *page) {
// Binary search for the correct range.
inodes := p.branchPageElements()
var exact bool
index := sort.Search(int(p.count), func(i int) bool {
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
ret := bytes.Compare(inodes[i].key(), key)
if ret == 0 {
exact = true
}
return ret != -1
})
if !exact && index > 0 {
index--
}
c.stack[len(c.stack)-1].index = index
// Recursively search to the next page.
c.search(key, inodes[index].pgid)
}
// nsearch searches the leaf node on the top of the stack for a key.
func (c *Cursor) nsearch(key []byte) {
e := &c.stack[len(c.stack)-1]
p, n := e.page, e.node
// If we have a node then search its inodes.
if n != nil {
index := sort.Search(len(n.inodes), func(i int) bool {
return bytes.Compare(n.inodes[i].key, key) != -1
})
e.index = index
return
}
// If we have a page then search its leaf elements.
inodes := p.leafPageElements()
index := sort.Search(int(p.count), func(i int) bool {
return bytes.Compare(inodes[i].key(), key) != -1
})
e.index = index
}
// keyValue returns the key and value of the current leaf element.
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
ref := &c.stack[len(c.stack)-1]
if ref.count() == 0 || ref.index >= ref.count() {
return nil, nil, 0
}
// Retrieve value from node.
if ref.node != nil {
inode := &ref.node.inodes[ref.index]
return inode.key, inode.value, inode.flags
}
// Or retrieve value from page.
elem := ref.page.leafPageElement(uint16(ref.index))
return elem.key(), elem.value(), elem.flags
}
// node returns the node that the cursor is currently positioned on.
func (c *Cursor) node() *node {
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
// If the top of the stack is a leaf node then just return it.
if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
return ref.node
}
// Start from root and traverse down the hierarchy.
var n = c.stack[0].node
if n == nil {
n = c.bucket.node(c.stack[0].page.id, nil)
}
for _, ref := range c.stack[:len(c.stack)-1] {
_assert(!n.isLeaf, "expected branch node")
n = n.childAt(int(ref.index))
}
_assert(n.isLeaf, "expected leaf node")
return n
}
// elemRef represents a reference to an element on a given page/node.
type elemRef struct {
page *page
node *node
index int
}
// isLeaf returns whether the ref is pointing at a leaf page/node.
func (r *elemRef) isLeaf() bool {
if r.node != nil {
return r.node.isLeaf
}
return (r.page.flags & leafPageFlag) != 0
}
// count returns the number of inodes or page elements.
func (r *elemRef) count() int {
if r.node != nil {
return len(r.node.inodes)
}
return int(r.page.count)
}

1137
vendor/github.com/coreos/bbolt/db.go generated vendored

File diff suppressed because it is too large Load diff

View file

@ -1,44 +0,0 @@
/*
Package bolt implements a low-level key/value store in pure Go. It supports
fully serializable transactions, ACID semantics, and lock-free MVCC with
multiple readers and a single writer. Bolt can be used for projects that
want a simple data store without the need to add large dependencies such as
Postgres or MySQL.
Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
optimized for fast read access and does not require recovery in the event of a
system crash. Transactions which have not finished committing will simply be
rolled back in the event of a crash.
The design of Bolt is based on Howard Chu's LMDB database project.
Bolt currently works on Windows, Mac OS X, and Linux.
Basics
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
a collection of buckets and is represented by a single file on disk. A bucket is
a collection of unique keys that are associated with values.
Transactions provide either read-only or read-write access to the database.
Read-only transactions can retrieve key/value pairs and can use Cursors to
iterate over the dataset sequentially. Read-write transactions can create and
delete buckets and can insert and remove keys. Only one read-write transaction
is allowed at a time.
Caveats
The database uses a read-only, memory-mapped data file to ensure that
applications cannot corrupt the database, however, this means that keys and
values returned from Bolt cannot be changed. Writing to a read-only byte slice
will cause Go to panic.
Keys and values retrieved from the database are only valid for the life of
the transaction. When used outside the transaction, these byte slices can
point to different data or can point to invalid memory which will cause a panic.
*/
package bolt

View file

@ -1,71 +0,0 @@
package bolt
import "errors"
// These errors can be returned when opening or calling methods on a DB.
var (
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
// is opened or after it is closed.
ErrDatabaseNotOpen = errors.New("database not open")
// ErrDatabaseOpen is returned when opening a database that is
// already open.
ErrDatabaseOpen = errors.New("database already open")
// ErrInvalid is returned when both meta pages on a database are invalid.
// This typically occurs when a file is not a bolt database.
ErrInvalid = errors.New("invalid database")
// ErrVersionMismatch is returned when the data file was created with a
// different version of Bolt.
ErrVersionMismatch = errors.New("version mismatch")
// ErrChecksum is returned when either meta page checksum does not match.
ErrChecksum = errors.New("checksum error")
// ErrTimeout is returned when a database cannot obtain an exclusive lock
// on the data file after the timeout passed to Open().
ErrTimeout = errors.New("timeout")
)
// These errors can occur when beginning or committing a Tx.
var (
// ErrTxNotWritable is returned when performing a write operation on a
// read-only transaction.
ErrTxNotWritable = errors.New("tx not writable")
// ErrTxClosed is returned when committing or rolling back a transaction
// that has already been committed or rolled back.
ErrTxClosed = errors.New("tx closed")
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
// read-only database.
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
)
// These errors can occur when putting or deleting a value or a bucket.
var (
// ErrBucketNotFound is returned when trying to access a bucket that has
// not been created yet.
ErrBucketNotFound = errors.New("bucket not found")
// ErrBucketExists is returned when creating a bucket that already exists.
ErrBucketExists = errors.New("bucket already exists")
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
ErrBucketNameRequired = errors.New("bucket name required")
// ErrKeyRequired is returned when inserting a zero-length key.
ErrKeyRequired = errors.New("key required")
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
ErrKeyTooLarge = errors.New("key too large")
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
ErrValueTooLarge = errors.New("value too large")
// ErrIncompatibleValue is returned when trying create or delete a bucket
// on an existing non-bucket key or when trying to create or delete a
// non-bucket key on an existing bucket key.
ErrIncompatibleValue = errors.New("incompatible value")
)

View file

@ -1,333 +0,0 @@
package bolt
import (
"fmt"
"sort"
"unsafe"
)
// txPending holds a list of pgids and corresponding allocation txns
// that are pending to be freed.
type txPending struct {
ids []pgid
alloctx []txid // txids allocating the ids
lastReleaseBegin txid // beginning txid of last matching releaseRange
}
// freelist represents a list of all pages that are available for allocation.
// It also tracks pages that have been freed but are still in use by open transactions.
type freelist struct {
ids []pgid // all free and available free page ids.
allocs map[pgid]txid // mapping of txid that allocated a pgid.
pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
cache map[pgid]bool // fast lookup of all free and pending page ids.
}
// newFreelist returns an empty, initialized freelist.
func newFreelist() *freelist {
return &freelist{
allocs: make(map[pgid]txid),
pending: make(map[txid]*txPending),
cache: make(map[pgid]bool),
}
}
// size returns the size of the page after serialization.
func (f *freelist) size() int {
n := f.count()
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n++
}
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
}
// count returns count of pages on the freelist
func (f *freelist) count() int {
return f.free_count() + f.pending_count()
}
// free_count returns count of free pages
func (f *freelist) free_count() int {
return len(f.ids)
}
// pending_count returns count of pending pages
func (f *freelist) pending_count() int {
var count int
for _, txp := range f.pending {
count += len(txp.ids)
}
return count
}
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
func (f *freelist) copyall(dst []pgid) {
m := make(pgids, 0, f.pending_count())
for _, txp := range f.pending {
m = append(m, txp.ids...)
}
sort.Sort(m)
mergepgids(dst, f.ids, m)
}
// allocate returns the starting page id of a contiguous list of pages of a given size.
// If a contiguous block cannot be found then 0 is returned.
func (f *freelist) allocate(txid txid, n int) pgid {
if len(f.ids) == 0 {
return 0
}
var initial, previd pgid
for i, id := range f.ids {
if id <= 1 {
panic(fmt.Sprintf("invalid page allocation: %d", id))
}
// Reset initial page if this is not contiguous.
if previd == 0 || id-previd != 1 {
initial = id
}
// If we found a contiguous block then remove it and return it.
if (id-initial)+1 == pgid(n) {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if (i + 1) == n {
f.ids = f.ids[i+1:]
} else {
copy(f.ids[i-n+1:], f.ids[i+1:])
f.ids = f.ids[:len(f.ids)-n]
}
// Remove from the free cache.
for i := pgid(0); i < pgid(n); i++ {
delete(f.cache, initial+i)
}
f.allocs[initial] = txid
return initial
}
previd = id
}
return 0
}
// free releases a page and its overflow for a given transaction id.
// If the page is already free then a panic will occur.
func (f *freelist) free(txid txid, p *page) {
if p.id <= 1 {
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
}
// Free page and all its overflow pages.
txp := f.pending[txid]
if txp == nil {
txp = &txPending{}
f.pending[txid] = txp
}
allocTxid, ok := f.allocs[p.id]
if ok {
delete(f.allocs, p.id)
} else if (p.flags & freelistPageFlag) != 0 {
// Freelist is always allocated by prior tx.
allocTxid = txid - 1
}
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
// Verify that page is not already free.
if f.cache[id] {
panic(fmt.Sprintf("page %d already freed", id))
}
// Add to the freelist and cache.
txp.ids = append(txp.ids, id)
txp.alloctx = append(txp.alloctx, allocTxid)
f.cache[id] = true
}
}
// release moves all page ids for a transaction id (or older) to the freelist.
func (f *freelist) release(txid txid) {
m := make(pgids, 0)
for tid, txp := range f.pending {
if tid <= txid {
// Move transaction's pending pages to the available freelist.
// Don't remove from the cache since the page is still free.
m = append(m, txp.ids...)
delete(f.pending, tid)
}
}
sort.Sort(m)
f.ids = pgids(f.ids).merge(m)
}
// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
func (f *freelist) releaseRange(begin, end txid) {
if begin > end {
return
}
var m pgids
for tid, txp := range f.pending {
if tid < begin || tid > end {
continue
}
// Don't recompute freed pages if ranges haven't updated.
if txp.lastReleaseBegin == begin {
continue
}
for i := 0; i < len(txp.ids); i++ {
if atx := txp.alloctx[i]; atx < begin || atx > end {
continue
}
m = append(m, txp.ids[i])
txp.ids[i] = txp.ids[len(txp.ids)-1]
txp.ids = txp.ids[:len(txp.ids)-1]
txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
i--
}
txp.lastReleaseBegin = begin
if len(txp.ids) == 0 {
delete(f.pending, tid)
}
}
sort.Sort(m)
f.ids = pgids(f.ids).merge(m)
}
// rollback removes the pages from a given pending tx.
func (f *freelist) rollback(txid txid) {
// Remove page ids from cache.
txp := f.pending[txid]
if txp == nil {
return
}
var m pgids
for i, pgid := range txp.ids {
delete(f.cache, pgid)
tx := txp.alloctx[i]
if tx == 0 {
continue
}
if tx != txid {
// Pending free aborted; restore page back to alloc list.
f.allocs[pgid] = tx
} else {
// Freed page was allocated by this txn; OK to throw away.
m = append(m, pgid)
}
}
// Remove pages from pending list and mark as free if allocated by txid.
delete(f.pending, txid)
sort.Sort(m)
f.ids = pgids(f.ids).merge(m)
}
// freed returns whether a given page is in the free list.
func (f *freelist) freed(pgid pgid) bool {
return f.cache[pgid]
}
// read initializes the freelist from a freelist page.
func (f *freelist) read(p *page) {
if (p.flags & freelistPageFlag) == 0 {
panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ()))
}
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
idx, count := 0, int(p.count)
if count == 0xFFFF {
idx = 1
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
}
// Copy the list of page ids from the freelist.
if count == 0 {
f.ids = nil
} else {
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count]
f.ids = make([]pgid, len(ids))
copy(f.ids, ids)
// Make sure they're sorted.
sort.Sort(pgids(f.ids))
}
// Rebuild the page cache.
f.reindex()
}
// read initializes the freelist from a given list of ids.
func (f *freelist) readIDs(ids []pgid) {
f.ids = ids
f.reindex()
}
// write writes the page ids onto a freelist page. All free and pending ids are
// saved to disk since in the event of a program crash, all pending ids will
// become free.
func (f *freelist) write(p *page) error {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= freelistPageFlag
// The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element.
lenids := f.count()
if lenids == 0 {
p.count = uint16(lenids)
} else if lenids < 0xFFFF {
p.count = uint16(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
} else {
p.count = 0xFFFF
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
}
return nil
}
// reload reads the freelist from a page and filters out pending items.
func (f *freelist) reload(p *page) {
f.read(p)
// Build a cache of only pending pages.
pcache := make(map[pgid]bool)
for _, txp := range f.pending {
for _, pendingID := range txp.ids {
pcache[pendingID] = true
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
var a []pgid
for _, id := range f.ids {
if !pcache[id] {
a = append(a, id)
}
}
f.ids = a
// Once the available list is rebuilt then rebuild the free cache so that
// it includes the available and pending free pages.
f.reindex()
}
// reindex rebuilds the free cache based on available and pending free lists.
func (f *freelist) reindex() {
f.cache = make(map[pgid]bool, len(f.ids))
for _, id := range f.ids {
f.cache[id] = true
}
for _, txp := range f.pending {
for _, pendingID := range txp.ids {
f.cache[pendingID] = true
}
}
}

View file

@ -1,604 +0,0 @@
package bolt
import (
"bytes"
"fmt"
"sort"
"unsafe"
)
// node represents an in-memory, deserialized page.
type node struct {
bucket *Bucket
isLeaf bool
unbalanced bool
spilled bool
key []byte
pgid pgid
parent *node
children nodes
inodes inodes
}
// root returns the top-level node this node is attached to.
func (n *node) root() *node {
if n.parent == nil {
return n
}
return n.parent.root()
}
// minKeys returns the minimum number of inodes this node should have.
func (n *node) minKeys() int {
if n.isLeaf {
return 1
}
return 2
}
// size returns the size of the node after serialization.
func (n *node) size() int {
sz, elsz := pageHeaderSize, n.pageElementSize()
for i := 0; i < len(n.inodes); i++ {
item := &n.inodes[i]
sz += elsz + len(item.key) + len(item.value)
}
return sz
}
// sizeLessThan returns true if the node is less than a given size.
// This is an optimization to avoid calculating a large node when we only need
// to know if it fits inside a certain page size.
func (n *node) sizeLessThan(v int) bool {
sz, elsz := pageHeaderSize, n.pageElementSize()
for i := 0; i < len(n.inodes); i++ {
item := &n.inodes[i]
sz += elsz + len(item.key) + len(item.value)
if sz >= v {
return false
}
}
return true
}
// pageElementSize returns the size of each page element based on the type of node.
func (n *node) pageElementSize() int {
if n.isLeaf {
return leafPageElementSize
}
return branchPageElementSize
}
// childAt returns the child node at a given index.
func (n *node) childAt(index int) *node {
if n.isLeaf {
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
}
return n.bucket.node(n.inodes[index].pgid, n)
}
// childIndex returns the index of a given child node.
func (n *node) childIndex(child *node) int {
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
return index
}
// numChildren returns the number of children.
func (n *node) numChildren() int {
return len(n.inodes)
}
// nextSibling returns the next node with the same parent.
func (n *node) nextSibling() *node {
if n.parent == nil {
return nil
}
index := n.parent.childIndex(n)
if index >= n.parent.numChildren()-1 {
return nil
}
return n.parent.childAt(index + 1)
}
// prevSibling returns the previous node with the same parent.
func (n *node) prevSibling() *node {
if n.parent == nil {
return nil
}
index := n.parent.childIndex(n)
if index == 0 {
return nil
}
return n.parent.childAt(index - 1)
}
// put inserts a key/value.
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
if pgid >= n.bucket.tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
} else if len(oldKey) <= 0 {
panic("put: zero-length old key")
} else if len(newKey) <= 0 {
panic("put: zero-length new key")
}
// Find insertion index.
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
// Add capacity and shift nodes if we don't have an exact match and need to insert.
exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
if !exact {
n.inodes = append(n.inodes, inode{})
copy(n.inodes[index+1:], n.inodes[index:])
}
inode := &n.inodes[index]
inode.flags = flags
inode.key = newKey
inode.value = value
inode.pgid = pgid
_assert(len(inode.key) > 0, "put: zero-length inode key")
}
// del removes a key from the node.
func (n *node) del(key []byte) {
// Find index of key.
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
// Exit if the key isn't found.
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
return
}
// Delete inode from the node.
n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
// Mark the node as needing rebalancing.
n.unbalanced = true
}
// read initializes the node from a page.
func (n *node) read(p *page) {
n.pgid = p.id
n.isLeaf = ((p.flags & leafPageFlag) != 0)
n.inodes = make(inodes, int(p.count))
for i := 0; i < int(p.count); i++ {
inode := &n.inodes[i]
if n.isLeaf {
elem := p.leafPageElement(uint16(i))
inode.flags = elem.flags
inode.key = elem.key()
inode.value = elem.value()
} else {
elem := p.branchPageElement(uint16(i))
inode.pgid = elem.pgid
inode.key = elem.key()
}
_assert(len(inode.key) > 0, "read: zero-length inode key")
}
// Save first key so we can find the node in the parent when we spill.
if len(n.inodes) > 0 {
n.key = n.inodes[0].key
_assert(len(n.key) > 0, "read: zero-length node key")
} else {
n.key = nil
}
}
// write writes the items onto one or more pages.
func (n *node) write(p *page) {
// Initialize page.
if n.isLeaf {
p.flags |= leafPageFlag
} else {
p.flags |= branchPageFlag
}
if len(n.inodes) >= 0xFFFF {
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
}
p.count = uint16(len(n.inodes))
// Stop here if there are no items to write.
if p.count == 0 {
return
}
// Loop over each item and write it to the page.
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
for i, item := range n.inodes {
_assert(len(item.key) > 0, "write: zero-length inode key")
// Write the page element.
if n.isLeaf {
elem := p.leafPageElement(uint16(i))
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
elem.flags = item.flags
elem.ksize = uint32(len(item.key))
elem.vsize = uint32(len(item.value))
} else {
elem := p.branchPageElement(uint16(i))
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
elem.ksize = uint32(len(item.key))
elem.pgid = item.pgid
_assert(elem.pgid != p.id, "write: circular dependency occurred")
}
// If the length of key+value is larger than the max allocation size
// then we need to reallocate the byte array pointer.
//
// See: https://github.com/boltdb/bolt/pull/335
klen, vlen := len(item.key), len(item.value)
if len(b) < klen+vlen {
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
}
// Write data for the element to the end of the page.
copy(b[0:], item.key)
b = b[klen:]
copy(b[0:], item.value)
b = b[vlen:]
}
// DEBUG ONLY: n.dump()
}
// split breaks up a node into multiple smaller nodes, if appropriate.
// This should only be called from the spill() function.
func (n *node) split(pageSize int) []*node {
var nodes []*node
node := n
for {
// Split node into two.
a, b := node.splitTwo(pageSize)
nodes = append(nodes, a)
// If we can't split then exit the loop.
if b == nil {
break
}
// Set node to b so it gets split on the next iteration.
node = b
}
return nodes
}
// splitTwo breaks up a node into two smaller nodes, if appropriate.
// This should only be called from the split() function.
func (n *node) splitTwo(pageSize int) (*node, *node) {
// Ignore the split if the page doesn't have at least enough nodes for
// two pages or if the nodes can fit in a single page.
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
return n, nil
}
// Determine the threshold before starting a new node.
var fillPercent = n.bucket.FillPercent
if fillPercent < minFillPercent {
fillPercent = minFillPercent
} else if fillPercent > maxFillPercent {
fillPercent = maxFillPercent
}
threshold := int(float64(pageSize) * fillPercent)
// Determine split position and sizes of the two pages.
splitIndex, _ := n.splitIndex(threshold)
// Split node into two separate nodes.
// If there's no parent then we'll need to create one.
if n.parent == nil {
n.parent = &node{bucket: n.bucket, children: []*node{n}}
}
// Create a new node and add it to the parent.
next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
n.parent.children = append(n.parent.children, next)
// Split inodes across two nodes.
next.inodes = n.inodes[splitIndex:]
n.inodes = n.inodes[:splitIndex]
// Update the statistics.
n.bucket.tx.stats.Split++
return n, next
}
// splitIndex finds the position where a page will fill a given threshold.
// It returns the index as well as the size of the first page.
// This is only be called from split().
func (n *node) splitIndex(threshold int) (index, sz int) {
sz = pageHeaderSize
// Loop until we only have the minimum number of keys required for the second page.
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
index = i
inode := n.inodes[i]
elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
// If we have at least the minimum number of keys and adding another
// node would put us over the threshold then exit and return.
if i >= minKeysPerPage && sz+elsize > threshold {
break
}
// Add the element size to the total size.
sz += elsize
}
return
}
// spill writes the nodes to dirty pages and splits nodes as it goes.
// Returns an error if dirty pages cannot be allocated.
func (n *node) spill() error {
var tx = n.bucket.tx
if n.spilled {
return nil
}
// Spill child nodes first. Child nodes can materialize sibling nodes in
// the case of split-merge so we cannot use a range loop. We have to check
// the children size on every loop iteration.
sort.Sort(n.children)
for i := 0; i < len(n.children); i++ {
if err := n.children[i].spill(); err != nil {
return err
}
}
// We no longer need the child list because it's only used for spill tracking.
n.children = nil
// Split nodes into appropriate sizes. The first node will always be n.
var nodes = n.split(tx.db.pageSize)
for _, node := range nodes {
// Add node's page to the freelist if it's not new.
if node.pgid > 0 {
tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
node.pgid = 0
}
// Allocate contiguous space for the node.
p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
if err != nil {
return err
}
// Write the node.
if p.id >= tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
}
node.pgid = p.id
node.write(p)
node.spilled = true
// Insert into parent inodes.
if node.parent != nil {
var key = node.key
if key == nil {
key = node.inodes[0].key
}
node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
node.key = node.inodes[0].key
_assert(len(node.key) > 0, "spill: zero-length node key")
}
// Update the statistics.
tx.stats.Spill++
}
// If the root node split and created a new root then we need to spill that
// as well. We'll clear out the children to make sure it doesn't try to respill.
if n.parent != nil && n.parent.pgid == 0 {
n.children = nil
return n.parent.spill()
}
return nil
}
// rebalance attempts to combine the node with sibling nodes if the node fill
// size is below a threshold or if there are not enough keys.
func (n *node) rebalance() {
if !n.unbalanced {
return
}
n.unbalanced = false
// Update statistics.
n.bucket.tx.stats.Rebalance++
// Ignore if node is above threshold (25%) and has enough keys.
var threshold = n.bucket.tx.db.pageSize / 4
if n.size() > threshold && len(n.inodes) > n.minKeys() {
return
}
// Root node has special handling.
if n.parent == nil {
// If root node is a branch and only has one node then collapse it.
if !n.isLeaf && len(n.inodes) == 1 {
// Move root's child up.
child := n.bucket.node(n.inodes[0].pgid, n)
n.isLeaf = child.isLeaf
n.inodes = child.inodes[:]
n.children = child.children
// Reparent all child nodes being moved.
for _, inode := range n.inodes {
if child, ok := n.bucket.nodes[inode.pgid]; ok {
child.parent = n
}
}
// Remove old child.
child.parent = nil
delete(n.bucket.nodes, child.pgid)
child.free()
}
return
}
// If node has no keys then just remove it.
if n.numChildren() == 0 {
n.parent.del(n.key)
n.parent.removeChild(n)
delete(n.bucket.nodes, n.pgid)
n.free()
n.parent.rebalance()
return
}
_assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
// Destination node is right sibling if idx == 0, otherwise left sibling.
var target *node
var useNextSibling = (n.parent.childIndex(n) == 0)
if useNextSibling {
target = n.nextSibling()
} else {
target = n.prevSibling()
}
// If both this node and the target node are too small then merge them.
if useNextSibling {
// Reparent all child nodes being moved.
for _, inode := range target.inodes {
if child, ok := n.bucket.nodes[inode.pgid]; ok {
child.parent.removeChild(child)
child.parent = n
child.parent.children = append(child.parent.children, child)
}
}
// Copy over inodes from target and remove target.
n.inodes = append(n.inodes, target.inodes...)
n.parent.del(target.key)
n.parent.removeChild(target)
delete(n.bucket.nodes, target.pgid)
target.free()
} else {
// Reparent all child nodes being moved.
for _, inode := range n.inodes {
if child, ok := n.bucket.nodes[inode.pgid]; ok {
child.parent.removeChild(child)
child.parent = target
child.parent.children = append(child.parent.children, child)
}
}
// Copy over inodes to target and remove node.
target.inodes = append(target.inodes, n.inodes...)
n.parent.del(n.key)
n.parent.removeChild(n)
delete(n.bucket.nodes, n.pgid)
n.free()
}
// Either this node or the target node was deleted from the parent so rebalance it.
n.parent.rebalance()
}
// removes a node from the list of in-memory children.
// This does not affect the inodes.
func (n *node) removeChild(target *node) {
for i, child := range n.children {
if child == target {
n.children = append(n.children[:i], n.children[i+1:]...)
return
}
}
}
// dereference causes the node to copy all its inode key/value references to heap memory.
// This is required when the mmap is reallocated so inodes are not pointing to stale data.
func (n *node) dereference() {
if n.key != nil {
key := make([]byte, len(n.key))
copy(key, n.key)
n.key = key
_assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
}
for i := range n.inodes {
inode := &n.inodes[i]
key := make([]byte, len(inode.key))
copy(key, inode.key)
inode.key = key
_assert(len(inode.key) > 0, "dereference: zero-length inode key")
value := make([]byte, len(inode.value))
copy(value, inode.value)
inode.value = value
}
// Recursively dereference children.
for _, child := range n.children {
child.dereference()
}
// Update statistics.
n.bucket.tx.stats.NodeDeref++
}
// free adds the node's underlying page to the freelist.
func (n *node) free() {
if n.pgid != 0 {
n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
n.pgid = 0
}
}
// dump writes the contents of the node to STDERR for debugging purposes.
/*
func (n *node) dump() {
// Write node header.
var typ = "branch"
if n.isLeaf {
typ = "leaf"
}
warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
// Write out abbreviated version of each item.
for _, item := range n.inodes {
if n.isLeaf {
if item.flags&bucketLeafFlag != 0 {
bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
} else {
warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
}
} else {
warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
}
}
warn("")
}
*/
type nodes []*node
func (s nodes) Len() int { return len(s) }
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
// inode represents an internal node inside of a node.
// It can be used to point to elements in a page or point
// to an element which hasn't been added to a page yet.
type inode struct {
flags uint32
pgid pgid
key []byte
value []byte
}
type inodes []inode

View file

@ -1,197 +0,0 @@
package bolt
import (
"fmt"
"os"
"sort"
"unsafe"
)
const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
const minKeysPerPage = 2
const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
const (
branchPageFlag = 0x01
leafPageFlag = 0x02
metaPageFlag = 0x04
freelistPageFlag = 0x10
)
const (
bucketLeafFlag = 0x01
)
type pgid uint64
type page struct {
id pgid
flags uint16
count uint16
overflow uint32
ptr uintptr
}
// typ returns a human readable page type string used for debugging.
func (p *page) typ() string {
if (p.flags & branchPageFlag) != 0 {
return "branch"
} else if (p.flags & leafPageFlag) != 0 {
return "leaf"
} else if (p.flags & metaPageFlag) != 0 {
return "meta"
} else if (p.flags & freelistPageFlag) != 0 {
return "freelist"
}
return fmt.Sprintf("unknown<%02x>", p.flags)
}
// meta returns a pointer to the metadata section of the page.
func (p *page) meta() *meta {
return (*meta)(unsafe.Pointer(&p.ptr))
}
// leafPageElement retrieves the leaf node by index
func (p *page) leafPageElement(index uint16) *leafPageElement {
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
return n
}
// leafPageElements retrieves a list of leaf nodes.
func (p *page) leafPageElements() []leafPageElement {
if p.count == 0 {
return nil
}
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
// branchPageElement retrieves the branch node by index
func (p *page) branchPageElement(index uint16) *branchPageElement {
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
}
// branchPageElements retrieves a list of branch nodes.
func (p *page) branchPageElements() []branchPageElement {
if p.count == 0 {
return nil
}
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
// dump writes n bytes of the page to STDERR as hex output.
func (p *page) hexdump(n int) {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
fmt.Fprintf(os.Stderr, "%x\n", buf)
}
type pages []*page
func (s pages) Len() int { return len(s) }
func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
// branchPageElement represents a node on a branch page.
type branchPageElement struct {
pos uint32
ksize uint32
pgid pgid
}
// key returns a byte slice of the node key.
func (n *branchPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
}
// leafPageElement represents a node on a leaf page.
type leafPageElement struct {
flags uint32
pos uint32
ksize uint32
vsize uint32
}
// key returns a byte slice of the node key.
func (n *leafPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
}
// value returns a byte slice of the node value.
func (n *leafPageElement) value() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
}
// PageInfo represents human readable information about a page.
type PageInfo struct {
ID int
Type string
Count int
OverflowCount int
}
type pgids []pgid
func (s pgids) Len() int { return len(s) }
func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
// merge returns the sorted union of a and b.
func (a pgids) merge(b pgids) pgids {
// Return the opposite slice if one is nil.
if len(a) == 0 {
return b
}
if len(b) == 0 {
return a
}
merged := make(pgids, len(a)+len(b))
mergepgids(merged, a, b)
return merged
}
// mergepgids copies the sorted union of a and b into dst.
// If dst is too small, it panics.
func mergepgids(dst, a, b pgids) {
if len(dst) < len(a)+len(b) {
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
}
// Copy in the opposite slice if one is nil.
if len(a) == 0 {
copy(dst, b)
return
}
if len(b) == 0 {
copy(dst, a)
return
}
// Merged will hold all elements from both lists.
merged := dst[:0]
// Assign lead to the slice with a lower starting value, follow to the higher value.
lead, follow := a, b
if b[0] < a[0] {
lead, follow = b, a
}
// Continue while there are elements in the lead.
for len(lead) > 0 {
// Merge largest prefix of lead that is ahead of follow[0].
n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
merged = append(merged, lead[:n]...)
if n >= len(lead) {
break
}
// Swap lead and follow.
lead, follow = follow, lead[n:]
}
// Append what's left in follow.
_ = append(merged, follow...)
}

705
vendor/github.com/coreos/bbolt/tx.go generated vendored
View file

@ -1,705 +0,0 @@
package bolt
import (
"fmt"
"io"
"os"
"sort"
"strings"
"time"
"unsafe"
)
// txid represents the internal transaction identifier.
type txid uint64
// Tx represents a read-only or read/write transaction on the database.
// Read-only transactions can be used for retrieving values for keys and creating cursors.
// Read/write transactions can create and remove buckets and create and remove keys.
//
// IMPORTANT: You must commit or rollback transactions when you are done with
// them. Pages can not be reclaimed by the writer until no more transactions
// are using them. A long running read transaction can cause the database to
// quickly grow.
type Tx struct {
writable bool
managed bool
db *DB
meta *meta
root Bucket
pages map[pgid]*page
stats TxStats
commitHandlers []func()
// WriteFlag specifies the flag for write-related methods like WriteTo().
// Tx opens the database file with the specified flag to copy the data.
//
// By default, the flag is unset, which works well for mostly in-memory
// workloads. For databases that are much larger than available RAM,
// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
WriteFlag int
}
// init initializes the transaction.
func (tx *Tx) init(db *DB) {
tx.db = db
tx.pages = nil
// Copy the meta page since it can be changed by the writer.
tx.meta = &meta{}
db.meta().copy(tx.meta)
// Copy over the root bucket.
tx.root = newBucket(tx)
tx.root.bucket = &bucket{}
*tx.root.bucket = tx.meta.root
// Increment the transaction id and add a page cache for writable transactions.
if tx.writable {
tx.pages = make(map[pgid]*page)
tx.meta.txid += txid(1)
}
}
// ID returns the transaction id.
func (tx *Tx) ID() int {
return int(tx.meta.txid)
}
// DB returns a reference to the database that created the transaction.
func (tx *Tx) DB() *DB {
return tx.db
}
// Size returns current database size in bytes as seen by this transaction.
func (tx *Tx) Size() int64 {
return int64(tx.meta.pgid) * int64(tx.db.pageSize)
}
// Writable returns whether the transaction can perform write operations.
func (tx *Tx) Writable() bool {
return tx.writable
}
// Cursor creates a cursor associated with the root bucket.
// All items in the cursor will return a nil value because all root bucket keys point to buckets.
// The cursor is only valid as long as the transaction is open.
// Do not use a cursor after the transaction is closed.
func (tx *Tx) Cursor() *Cursor {
return tx.root.Cursor()
}
// Stats retrieves a copy of the current transaction statistics.
func (tx *Tx) Stats() TxStats {
return tx.stats
}
// Bucket retrieves a bucket by name.
// Returns nil if the bucket does not exist.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) Bucket(name []byte) *Bucket {
return tx.root.Bucket(name)
}
// CreateBucket creates a new bucket.
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
return tx.root.CreateBucket(name)
}
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
// Returns an error if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
return tx.root.CreateBucketIfNotExists(name)
}
// DeleteBucket deletes a bucket.
// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
func (tx *Tx) DeleteBucket(name []byte) error {
return tx.root.DeleteBucket(name)
}
// ForEach executes a function for each bucket in the root.
// If the provided function returns an error then the iteration is stopped and
// the error is returned to the caller.
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
return tx.root.ForEach(func(k, v []byte) error {
return fn(k, tx.root.Bucket(k))
})
}
// OnCommit adds a handler function to be executed after the transaction successfully commits.
func (tx *Tx) OnCommit(fn func()) {
tx.commitHandlers = append(tx.commitHandlers, fn)
}
// Commit writes all changes to disk and updates the meta page.
// Returns an error if a disk write error occurs, or if Commit is
// called on a read-only transaction.
func (tx *Tx) Commit() error {
_assert(!tx.managed, "managed tx commit not allowed")
if tx.db == nil {
return ErrTxClosed
} else if !tx.writable {
return ErrTxNotWritable
}
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
// Rebalance nodes which have had deletions.
var startTime = time.Now()
tx.root.rebalance()
if tx.stats.Rebalance > 0 {
tx.stats.RebalanceTime += time.Since(startTime)
}
// spill data onto dirty pages.
startTime = time.Now()
if err := tx.root.spill(); err != nil {
tx.rollback()
return err
}
tx.stats.SpillTime += time.Since(startTime)
// Free the old root bucket.
tx.meta.root.root = tx.root.root
// Free the old freelist because commit writes out a fresh freelist.
if tx.meta.freelist != pgidNoFreelist {
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
}
if !tx.db.NoFreelistSync {
err := tx.commitFreelist()
if err != nil {
return err
}
} else {
tx.meta.freelist = pgidNoFreelist
}
// Write dirty pages to disk.
startTime = time.Now()
if err := tx.write(); err != nil {
tx.rollback()
return err
}
// If strict mode is enabled then perform a consistency check.
// Only the first consistency error is reported in the panic.
if tx.db.StrictMode {
ch := tx.Check()
var errs []string
for {
err, ok := <-ch
if !ok {
break
}
errs = append(errs, err.Error())
}
if len(errs) > 0 {
panic("check fail: " + strings.Join(errs, "\n"))
}
}
// Write meta to disk.
if err := tx.writeMeta(); err != nil {
tx.rollback()
return err
}
tx.stats.WriteTime += time.Since(startTime)
// Finalize the transaction.
tx.close()
// Execute commit handlers now that the locks have been removed.
for _, fn := range tx.commitHandlers {
fn()
}
return nil
}
func (tx *Tx) commitFreelist() error {
// Allocate new pages for the new free list. This will overestimate
// the size of the freelist but not underestimate the size (which would be bad).
opgid := tx.meta.pgid
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
if err != nil {
tx.rollback()
return err
}
if err := tx.db.freelist.write(p); err != nil {
tx.rollback()
return err
}
tx.meta.freelist = p.id
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.pgid > opgid {
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
tx.rollback()
return err
}
}
return nil
}
// Rollback closes the transaction and ignores all previous updates. Read-only
// transactions must be rolled back and not committed.
func (tx *Tx) Rollback() error {
_assert(!tx.managed, "managed tx rollback not allowed")
if tx.db == nil {
return ErrTxClosed
}
tx.rollback()
return nil
}
func (tx *Tx) rollback() {
if tx.db == nil {
return
}
if tx.writable {
tx.db.freelist.rollback(tx.meta.txid)
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
}
tx.close()
}
func (tx *Tx) close() {
if tx.db == nil {
return
}
if tx.writable {
// Grab freelist stats.
var freelistFreeN = tx.db.freelist.free_count()
var freelistPendingN = tx.db.freelist.pending_count()
var freelistAlloc = tx.db.freelist.size()
// Remove transaction ref & writer lock.
tx.db.rwtx = nil
tx.db.rwlock.Unlock()
// Merge statistics.
tx.db.statlock.Lock()
tx.db.stats.FreePageN = freelistFreeN
tx.db.stats.PendingPageN = freelistPendingN
tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
tx.db.stats.FreelistInuse = freelistAlloc
tx.db.stats.TxStats.add(&tx.stats)
tx.db.statlock.Unlock()
} else {
tx.db.removeTx(tx)
}
// Clear all references.
tx.db = nil
tx.meta = nil
tx.root = Bucket{tx: tx}
tx.pages = nil
}
// Copy writes the entire database to a writer.
// This function exists for backwards compatibility. Use WriteTo() instead.
func (tx *Tx) Copy(w io.Writer) error {
_, err := tx.WriteTo(w)
return err
}
// WriteTo writes the entire database to a writer.
// If err == nil then exactly tx.Size() bytes will be written into the writer.
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
// Attempt to open reader with WriteFlag
f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
if err != nil {
return 0, err
}
defer func() {
if cerr := f.Close(); err == nil {
err = cerr
}
}()
// Generate a meta page. We use the same page data for both meta pages.
buf := make([]byte, tx.db.pageSize)
page := (*page)(unsafe.Pointer(&buf[0]))
page.flags = metaPageFlag
*page.meta() = *tx.meta
// Write meta 0.
page.id = 0
page.meta().checksum = page.meta().sum64()
nn, err := w.Write(buf)
n += int64(nn)
if err != nil {
return n, fmt.Errorf("meta 0 copy: %s", err)
}
// Write meta 1 with a lower transaction id.
page.id = 1
page.meta().txid -= 1
page.meta().checksum = page.meta().sum64()
nn, err = w.Write(buf)
n += int64(nn)
if err != nil {
return n, fmt.Errorf("meta 1 copy: %s", err)
}
// Move past the meta pages in the file.
if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil {
return n, fmt.Errorf("seek: %s", err)
}
// Copy data pages.
wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
n += wn
if err != nil {
return n, err
}
return n, nil
}
// CopyFile copies the entire database to file at the given path.
// A reader transaction is maintained during the copy so it is safe to continue
// using the database while a copy is in progress.
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
if err != nil {
return err
}
err = tx.Copy(f)
if err != nil {
_ = f.Close()
return err
}
return f.Close()
}
// Check performs several consistency checks on the database for this transaction.
// An error is returned if any inconsistency is found.
//
// It can be safely run concurrently on a writable transaction. However, this
// incurs a high cost for large databases and databases with a lot of subbuckets
// because of caching. This overhead can be removed if running on a read-only
// transaction, however, it is not safe to execute other writer transactions at
// the same time.
func (tx *Tx) Check() <-chan error {
ch := make(chan error)
go tx.check(ch)
return ch
}
func (tx *Tx) check(ch chan error) {
// Force loading free list if opened in ReadOnly mode.
tx.db.loadFreelist()
// Check if any pages are double freed.
freed := make(map[pgid]bool)
all := make([]pgid, tx.db.freelist.count())
tx.db.freelist.copyall(all)
for _, id := range all {
if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id)
}
freed[id] = true
}
// Track every reachable page.
reachable := make(map[pgid]*page)
reachable[0] = tx.page(0) // meta0
reachable[1] = tx.page(1) // meta1
if tx.meta.freelist != pgidNoFreelist {
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
}
}
// Recursively check buckets.
tx.checkBucket(&tx.root, reachable, freed, ch)
// Ensure all pages below high water mark are either reachable or freed.
for i := pgid(0); i < tx.meta.pgid; i++ {
_, isReachable := reachable[i]
if !isReachable && !freed[i] {
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
}
}
// Close the channel to signal completion.
close(ch)
}
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
// Ignore inline buckets.
if b.root == 0 {
return
}
// Check every page used by this bucket.
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
if p.id > tx.meta.pgid {
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
}
// Ensure each page is only referenced once.
for i := pgid(0); i <= pgid(p.overflow); i++ {
var id = p.id + i
if _, ok := reachable[id]; ok {
ch <- fmt.Errorf("page %d: multiple references", int(id))
}
reachable[id] = p
}
// We should only encounter un-freed leaf and branch pages.
if freed[p.id] {
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
}
})
// Check each bucket within this bucket.
_ = b.ForEach(func(k, v []byte) error {
if child := b.Bucket(k); child != nil {
tx.checkBucket(child, reachable, freed, ch)
}
return nil
})
}
// allocate returns a contiguous block of memory starting at a given page.
func (tx *Tx) allocate(count int) (*page, error) {
p, err := tx.db.allocate(tx.meta.txid, count)
if err != nil {
return nil, err
}
// Save to our page cache.
tx.pages[p.id] = p
// Update statistics.
tx.stats.PageCount++
tx.stats.PageAlloc += count * tx.db.pageSize
return p, nil
}
// write writes any dirty pages to disk.
func (tx *Tx) write() error {
// Sort pages by id.
pages := make(pages, 0, len(tx.pages))
for _, p := range tx.pages {
pages = append(pages, p)
}
// Clear out page cache early.
tx.pages = make(map[pgid]*page)
sort.Sort(pages)
// Write pages to disk in order.
for _, p := range pages {
size := (int(p.overflow) + 1) * tx.db.pageSize
offset := int64(p.id) * int64(tx.db.pageSize)
// Write out page in "max allocation" sized chunks.
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
for {
// Limit our write to our max allocation size.
sz := size
if sz > maxAllocSize-1 {
sz = maxAllocSize - 1
}
// Write chunk to disk.
buf := ptr[:sz]
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
return err
}
// Update statistics.
tx.stats.Write++
// Exit inner for loop if we've written all the chunks.
size -= sz
if size == 0 {
break
}
// Otherwise move offset forward and move pointer to next chunk.
offset += int64(sz)
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
}
}
// Ignore file sync if flag is set on DB.
if !tx.db.NoSync || IgnoreNoSync {
if err := fdatasync(tx.db); err != nil {
return err
}
}
// Put small pages back to page pool.
for _, p := range pages {
// Ignore page sizes over 1 page.
// These are allocated using make() instead of the page pool.
if int(p.overflow) != 0 {
continue
}
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
for i := range buf {
buf[i] = 0
}
tx.db.pagePool.Put(buf)
}
return nil
}
// writeMeta writes the meta to the disk.
func (tx *Tx) writeMeta() error {
// Create a temporary buffer for the meta page.
buf := make([]byte, tx.db.pageSize)
p := tx.db.pageInBuffer(buf, 0)
tx.meta.write(p)
// Write the meta page to file.
if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
return err
}
if !tx.db.NoSync || IgnoreNoSync {
if err := fdatasync(tx.db); err != nil {
return err
}
}
// Update statistics.
tx.stats.Write++
return nil
}
// page returns a reference to the page with a given id.
// If page has been written to then a temporary buffered page is returned.
func (tx *Tx) page(id pgid) *page {
// Check the dirty pages first.
if tx.pages != nil {
if p, ok := tx.pages[id]; ok {
return p
}
}
// Otherwise return directly from the mmap.
return tx.db.page(id)
}
// forEachPage iterates over every page within a given page and executes a function.
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
p := tx.page(pgid)
// Execute function.
fn(p, depth)
// Recursively loop over children.
if (p.flags & branchPageFlag) != 0 {
for i := 0; i < int(p.count); i++ {
elem := p.branchPageElement(uint16(i))
tx.forEachPage(elem.pgid, depth+1, fn)
}
}
}
// Page returns page information for a given page number.
// This is only safe for concurrent use when used by a writable transaction.
func (tx *Tx) Page(id int) (*PageInfo, error) {
if tx.db == nil {
return nil, ErrTxClosed
} else if pgid(id) >= tx.meta.pgid {
return nil, nil
}
// Build the page info.
p := tx.db.page(pgid(id))
info := &PageInfo{
ID: id,
Count: int(p.count),
OverflowCount: int(p.overflow),
}
// Determine the type (or if it's free).
if tx.db.freelist.freed(pgid(id)) {
info.Type = "free"
} else {
info.Type = p.typ()
}
return info, nil
}
// TxStats represents statistics about the actions performed by the transaction.
type TxStats struct {
// Page statistics.
PageCount int // number of page allocations
PageAlloc int // total bytes allocated
// Cursor statistics.
CursorCount int // number of cursors created
// Node statistics
NodeCount int // number of node allocations
NodeDeref int // number of node dereferences
// Rebalance statistics.
Rebalance int // number of node rebalances
RebalanceTime time.Duration // total time spent rebalancing
// Split/Spill statistics.
Split int // number of nodes split
Spill int // number of nodes spilled
SpillTime time.Duration // total time spent spilling
// Write statistics.
Write int // number of writes performed
WriteTime time.Duration // total time spent writing to disk
}
func (s *TxStats) add(other *TxStats) {
s.PageCount += other.PageCount
s.PageAlloc += other.PageAlloc
s.CursorCount += other.CursorCount
s.NodeCount += other.NodeCount
s.NodeDeref += other.NodeDeref
s.Rebalance += other.Rebalance
s.RebalanceTime += other.RebalanceTime
s.Split += other.Split
s.Spill += other.Spill
s.SpillTime += other.SpillTime
s.Write += other.Write
s.WriteTime += other.WriteTime
}
// Sub calculates and returns the difference between two sets of transaction stats.
// This is useful when obtaining stats at two different points and time and
// you need the performance counters that occurred within that time span.
func (s *TxStats) Sub(other *TxStats) TxStats {
var diff TxStats
diff.PageCount = s.PageCount - other.PageCount
diff.PageAlloc = s.PageAlloc - other.PageAlloc
diff.CursorCount = s.CursorCount - other.CursorCount
diff.NodeCount = s.NodeCount - other.NodeCount
diff.NodeDeref = s.NodeDeref - other.NodeDeref
diff.Rebalance = s.Rebalance - other.Rebalance
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
diff.Split = s.Split - other.Split
diff.Spill = s.Spill - other.Spill
diff.SpillTime = s.SpillTime - other.SpillTime
diff.Write = s.Write - other.Write
diff.WriteTime = s.WriteTime - other.WriteTime
return diff
}

202
vendor/github.com/coreos/etcd/LICENSE generated vendored
View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,5 +0,0 @@
CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

View file

@ -1,807 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: auth.proto
/*
Package authpb is a generated protocol buffer package.
It is generated from these files:
auth.proto
It has these top-level messages:
User
Permission
Role
*/
package authpb
import (
"fmt"
proto "github.com/golang/protobuf/proto"
math "math"
_ "github.com/gogo/protobuf/gogoproto"
io "io"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Permission_Type int32
const (
READ Permission_Type = 0
WRITE Permission_Type = 1
READWRITE Permission_Type = 2
)
var Permission_Type_name = map[int32]string{
0: "READ",
1: "WRITE",
2: "READWRITE",
}
var Permission_Type_value = map[string]int32{
"READ": 0,
"WRITE": 1,
"READWRITE": 2,
}
func (x Permission_Type) String() string {
return proto.EnumName(Permission_Type_name, int32(x))
}
func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} }
// User is a single entry in the bucket authUsers
type User struct {
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"`
}
func (m *User) Reset() { *m = User{} }
func (m *User) String() string { return proto.CompactTextString(m) }
func (*User) ProtoMessage() {}
func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
// Permission is a single entity
type Permission struct {
PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
}
func (m *Permission) Reset() { *m = Permission{} }
func (m *Permission) String() string { return proto.CompactTextString(m) }
func (*Permission) ProtoMessage() {}
func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
// Role is a single entry in the bucket authRoles
type Role struct {
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"`
}
func (m *Role) Reset() { *m = Role{} }
func (m *Role) String() string { return proto.CompactTextString(m) }
func (*Role) ProtoMessage() {}
func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} }
func init() {
proto.RegisterType((*User)(nil), "authpb.User")
proto.RegisterType((*Permission)(nil), "authpb.Permission")
proto.RegisterType((*Role)(nil), "authpb.Role")
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
}
func (m *User) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *User) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if len(m.Password) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
i += copy(dAtA[i:], m.Password)
}
if len(m.Roles) > 0 {
for _, s := range m.Roles {
dAtA[i] = 0x1a
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
return i, nil
}
func (m *Permission) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.PermType != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.RangeEnd) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
i += copy(dAtA[i:], m.RangeEnd)
}
return i, nil
}
func (m *Role) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Role) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if len(m.KeyPermission) > 0 {
for _, msg := range m.KeyPermission {
dAtA[i] = 0x12
i++
i = encodeVarintAuth(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *User) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
l = len(m.Password)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
if len(m.Roles) > 0 {
for _, s := range m.Roles {
l = len(s)
n += 1 + l + sovAuth(uint64(l))
}
}
return n
}
func (m *Permission) Size() (n int) {
var l int
_ = l
if m.PermType != 0 {
n += 1 + sovAuth(uint64(m.PermType))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
l = len(m.RangeEnd)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
return n
}
func (m *Role) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
if len(m.KeyPermission) > 0 {
for _, e := range m.KeyPermission {
l = e.Size()
n += 1 + l + sovAuth(uint64(l))
}
}
return n
}
func sovAuth(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozAuth(x uint64) (n int) {
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *User) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: User: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
if m.Name == nil {
m.Name = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
if m.Password == nil {
m.Password = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Permission) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Permission: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
}
m.PermType = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.PermType |= (Permission_Type(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
if m.RangeEnd == nil {
m.RangeEnd = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Role) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Role: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
if m.Name == nil {
m.Name = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.KeyPermission = append(m.KeyPermission, &Permission{})
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipAuth(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthAuth
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipAuth(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
var fileDescriptorAuth = []byte{
// 288 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
}

View file

@ -1,233 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"fmt"
"strings"
"github.com/coreos/etcd/auth/authpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"google.golang.org/grpc"
)
type (
AuthEnableResponse pb.AuthEnableResponse
AuthDisableResponse pb.AuthDisableResponse
AuthenticateResponse pb.AuthenticateResponse
AuthUserAddResponse pb.AuthUserAddResponse
AuthUserDeleteResponse pb.AuthUserDeleteResponse
AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
AuthUserGetResponse pb.AuthUserGetResponse
AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
AuthRoleAddResponse pb.AuthRoleAddResponse
AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
AuthRoleGetResponse pb.AuthRoleGetResponse
AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
AuthUserListResponse pb.AuthUserListResponse
AuthRoleListResponse pb.AuthRoleListResponse
PermissionType authpb.Permission_Type
Permission authpb.Permission
)
const (
PermRead = authpb.READ
PermWrite = authpb.WRITE
PermReadWrite = authpb.READWRITE
)
type Auth interface {
// AuthEnable enables auth of an etcd cluster.
AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
// AuthDisable disables auth of an etcd cluster.
AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
// UserAdd adds a new user to an etcd cluster.
UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
// UserDelete deletes a user from an etcd cluster.
UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
// UserChangePassword changes a password of a user.
UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
// UserGrantRole grants a role to a user.
UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
// UserGet gets a detailed information of a user.
UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
// UserList gets a list of all users.
UserList(ctx context.Context) (*AuthUserListResponse, error)
// UserRevokeRole revokes a role of a user.
UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
// RoleAdd adds a new role to an etcd cluster.
RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
// RoleGrantPermission grants a permission to a role.
RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
// RoleGet gets a detailed information of a role.
RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
// RoleList gets a list of all roles.
RoleList(ctx context.Context) (*AuthRoleListResponse, error)
// RoleRevokePermission revokes a permission from a role.
RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
// RoleDelete deletes a role.
RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
}
type auth struct {
remote pb.AuthClient
callOpts []grpc.CallOption
}
func NewAuth(c *Client) Auth {
api := &auth{remote: RetryAuthClient(c)}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
return (*AuthEnableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
return (*AuthDisableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
return (*AuthUserListResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
perm := &authpb.Permission{
Key: []byte(key),
RangeEnd: []byte(rangeEnd),
PermType: authpb.Permission_Type(permType),
}
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...)
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
}
func StrToPermissionType(s string) (PermissionType, error) {
val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
if ok {
return PermissionType(val), nil
}
return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
}
type authenticator struct {
conn *grpc.ClientConn // conn in-use
remote pb.AuthClient
callOpts []grpc.CallOption
}
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthenticateResponse)(resp), toErr(ctx, err)
}
func (auth *authenticator) close() {
auth.conn.Close()
}
func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return nil, err
}
api := &authenticator{
conn: conn,
remote: pb.NewAuthClient(conn),
}
if c != nil {
api.callOpts = c.callOpts
}
return api, nil
}

View file

@ -1,562 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
var (
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
ErrOldCluster = errors.New("etcdclient: old cluster version")
)
// Client provides and manages an etcd v3 client session.
type Client struct {
Cluster
KV
Lease
Watcher
Auth
Maintenance
conn *grpc.ClientConn
dialerrc chan error
cfg Config
creds *credentials.TransportCredentials
balancer *healthBalancer
mu *sync.Mutex
ctx context.Context
cancel context.CancelFunc
// Username is a user name for authentication.
Username string
// Password is a password for authentication.
Password string
// tokenCred is an instance of WithPerRPCCredentials()'s argument
tokenCred *authTokenCredential
callOpts []grpc.CallOption
}
// New creates a new etcdv3 client from a given configuration.
func New(cfg Config) (*Client, error) {
if len(cfg.Endpoints) == 0 {
return nil, ErrNoAvailableEndpoints
}
return newClient(&cfg)
}
// NewCtxClient creates a client with a context but no underlying grpc
// connection. This is useful for embedded cases that override the
// service interface implementations and do not need connection management.
func NewCtxClient(ctx context.Context) *Client {
cctx, cancel := context.WithCancel(ctx)
return &Client{ctx: cctx, cancel: cancel}
}
// NewFromURL creates a new etcdv3 client from a URL.
func NewFromURL(url string) (*Client, error) {
return New(Config{Endpoints: []string{url}})
}
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.cancel()
c.Watcher.Close()
c.Lease.Close()
if c.conn != nil {
return toErr(c.ctx, c.conn.Close())
}
return c.ctx.Err()
}
// Ctx is a context for "out of band" messages (e.g., for sending
// "clean up" message when another context is canceled). It is
// canceled on client Close().
func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client.
func (c *Client) Endpoints() (eps []string) {
// copy the slice; protect original endpoints from being changed
eps = make([]string, len(c.cfg.Endpoints))
copy(eps, c.cfg.Endpoints)
return
}
// SetEndpoints updates client's endpoints.
func (c *Client) SetEndpoints(eps ...string) {
c.mu.Lock()
c.cfg.Endpoints = eps
c.mu.Unlock()
c.balancer.updateAddrs(eps...)
// updating notifyCh can trigger new connections,
// need update addrs if all connections are down
// or addrs does not include pinAddr.
c.balancer.mu.RLock()
update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
c.balancer.mu.RUnlock()
if update {
select {
case c.balancer.updateAddrsC <- notifyNext:
case <-c.balancer.stopc:
}
}
}
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
func (c *Client) Sync(ctx context.Context) error {
mresp, err := c.MemberList(ctx)
if err != nil {
return err
}
var eps []string
for _, m := range mresp.Members {
eps = append(eps, m.ClientURLs...)
}
c.SetEndpoints(eps...)
return nil
}
func (c *Client) autoSync() {
if c.cfg.AutoSyncInterval == time.Duration(0) {
return
}
for {
select {
case <-c.ctx.Done():
return
case <-time.After(c.cfg.AutoSyncInterval):
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
err := c.Sync(ctx)
cancel()
if err != nil && err != c.ctx.Err() {
logger.Println("Auto sync endpoints failed:", err)
}
}
}
}
type authTokenCredential struct {
token string
tokenMu *sync.RWMutex
}
func (cred authTokenCredential) RequireTransportSecurity() bool {
return false
}
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
cred.tokenMu.RLock()
defer cred.tokenMu.RUnlock()
return map[string]string{
"token": cred.token,
}, nil
}
func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
proto = "tcp"
host = endpoint
url, uerr := url.Parse(endpoint)
if uerr != nil || !strings.Contains(endpoint, "://") {
return proto, host, scheme
}
scheme = url.Scheme
// strip scheme:// prefix since grpc dials by host
host = url.Host
switch url.Scheme {
case "http", "https":
case "unix", "unixs":
proto = "unix"
host = url.Host + url.Path
default:
proto, host = "", ""
}
return proto, host, scheme
}
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
creds = c.creds
switch scheme {
case "unix":
case "http":
creds = nil
case "https", "unixs":
if creds != nil {
break
}
tlsconfig := &tls.Config{}
emptyCreds := credentials.NewTLS(tlsconfig)
creds = &emptyCreds
default:
creds = nil
}
return creds
}
// dialSetupOpts gives the dial opts prior to any authentication
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) {
if c.cfg.DialTimeout > 0 {
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
}
if c.cfg.DialKeepAliveTime > 0 {
params := keepalive.ClientParameters{
Time: c.cfg.DialKeepAliveTime,
Timeout: c.cfg.DialKeepAliveTimeout,
}
opts = append(opts, grpc.WithKeepaliveParams(params))
}
opts = append(opts, dopts...)
f := func(host string, t time.Duration) (net.Conn, error) {
proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
if host == "" && endpoint != "" {
// dialing an endpoint not in the balancer; use
// endpoint passed into dial
proto, host, _ = parseEndpoint(endpoint)
}
if proto == "" {
return nil, fmt.Errorf("unknown scheme for %q", host)
}
select {
case <-c.ctx.Done():
return nil, c.ctx.Err()
default:
}
dialer := &net.Dialer{Timeout: t}
conn, err := dialer.DialContext(c.ctx, proto, host)
if err != nil {
select {
case c.dialerrc <- err:
default:
}
}
return conn, err
}
opts = append(opts, grpc.WithDialer(f))
creds := c.creds
if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
creds = c.processCreds(scheme)
}
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(*creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
return opts
}
// Dial connects to a single endpoint using the client's config.
func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
return c.dial(endpoint)
}
func (c *Client) getToken(ctx context.Context) error {
var err error // return last error in a case of fail
var auth *authenticator
for i := 0; i < len(c.cfg.Endpoints); i++ {
endpoint := c.cfg.Endpoints[i]
host := getHost(endpoint)
// use dial options without dopts to avoid reusing the client balancer
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c)
if err != nil {
continue
}
defer auth.close()
var resp *AuthenticateResponse
resp, err = auth.authenticate(ctx, c.Username, c.Password)
if err != nil {
continue
}
c.tokenCred.tokenMu.Lock()
c.tokenCred.token = resp.Token
c.tokenCred.tokenMu.Unlock()
return nil
}
return err
}
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
opts := c.dialSetupOpts(endpoint, dopts...)
host := getHost(endpoint)
if c.Username != "" && c.Password != "" {
c.tokenCred = &authTokenCredential{
tokenMu: &sync.RWMutex{},
}
ctx := c.ctx
if c.cfg.DialTimeout > 0 {
cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout)
defer cancel()
ctx = cctx
}
err := c.getToken(ctx)
if err != nil {
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
err = context.DeadlineExceeded
}
return nil, err
}
} else {
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
}
}
opts = append(opts, c.cfg.DialOptions...)
conn, err := grpc.DialContext(c.ctx, host, opts...)
if err != nil {
return nil, err
}
return conn, nil
}
// WithRequireLeader requires client requests to only succeed
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewOutgoingContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = &Config{}
}
var creds *credentials.TransportCredentials
if cfg.TLS != nil {
c := credentials.NewTLS(cfg.TLS)
creds = &c
}
// use a temporary skeleton client to bootstrap first connection
baseCtx := context.TODO()
if cfg.Context != nil {
baseCtx = cfg.Context
}
ctx, cancel := context.WithCancel(baseCtx)
client := &Client{
conn: nil,
dialerrc: make(chan error, 1),
cfg: *cfg,
creds: creds,
ctx: ctx,
cancel: cancel,
mu: new(sync.Mutex),
callOpts: defaultCallOpts,
}
if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username
client.Password = cfg.Password
}
if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
}
callOpts := []grpc.CallOption{
defaultFailFast,
defaultMaxCallSendMsgSize,
defaultMaxCallRecvMsgSize,
}
if cfg.MaxCallSendMsgSize > 0 {
callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
}
if cfg.MaxCallRecvMsgSize > 0 {
callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
}
client.callOpts = callOpts
}
client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
return grpcHealthCheck(client, ep)
})
// use Endpoints[0] so that for https:// without any tls config given, then
// grpc will assume the certificate server name is the endpoint host.
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
if err != nil {
client.cancel()
client.balancer.Close()
return nil, err
}
client.conn = conn
// wait for a connection
if cfg.DialTimeout > 0 {
hasConn := false
waitc := time.After(cfg.DialTimeout)
select {
case <-client.balancer.ready():
hasConn = true
case <-ctx.Done():
case <-waitc:
}
if !hasConn {
err := context.DeadlineExceeded
select {
case err = <-client.dialerrc:
default:
}
client.cancel()
client.balancer.Close()
conn.Close()
return nil, err
}
}
client.Cluster = NewCluster(client)
client.KV = NewKV(client)
client.Lease = NewLease(client)
client.Watcher = NewWatcher(client)
client.Auth = NewAuth(client)
client.Maintenance = NewMaintenance(client)
if cfg.RejectOldCluster {
if err := client.checkVersion(); err != nil {
client.Close()
return nil, err
}
}
go client.autoSync()
return client, nil
}
func (c *Client) checkVersion() (err error) {
var wg sync.WaitGroup
errc := make(chan error, len(c.cfg.Endpoints))
ctx, cancel := context.WithCancel(c.ctx)
if c.cfg.DialTimeout > 0 {
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
}
wg.Add(len(c.cfg.Endpoints))
for _, ep := range c.cfg.Endpoints {
// if cluster is current, any endpoint gives a recent version
go func(e string) {
defer wg.Done()
resp, rerr := c.Status(ctx, e)
if rerr != nil {
errc <- rerr
return
}
vs := strings.Split(resp.Version, ".")
maj, min := 0, 0
if len(vs) >= 2 {
maj, _ = strconv.Atoi(vs[0])
min, rerr = strconv.Atoi(vs[1])
}
if maj < 3 || (maj == 3 && min < 2) {
rerr = ErrOldCluster
}
errc <- rerr
}(ep)
}
// wait for success
for i := 0; i < len(c.cfg.Endpoints); i++ {
if err = <-errc; err == nil {
break
}
}
cancel()
wg.Wait()
return err
}
// ActiveConnection returns the current in-use connection
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
// isHaltErr returns true if the given error and context indicate no forward
// progress can be made, even after reconnecting.
func isHaltErr(ctx context.Context, err error) bool {
if ctx != nil && ctx.Err() != nil {
return true
}
if err == nil {
return false
}
ev, _ := status.FromError(err)
// Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader)
// Treat Internal codes as if something failed, leaving the
// system in an inconsistent state, but retrying could make progress.
// (e.g., failed in middle of send, corrupted frame)
// TODO: are permanent Internal errors possible from grpc?
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
}
func toErr(ctx context.Context, err error) error {
if err == nil {
return nil
}
err = rpctypes.Error(err)
if _, ok := err.(rpctypes.EtcdError); ok {
return err
}
ev, _ := status.FromError(err)
code := ev.Code()
switch code {
case codes.DeadlineExceeded:
fallthrough
case codes.Canceled:
if ctx.Err() != nil {
err = ctx.Err()
}
case codes.Unavailable:
case codes.FailedPrecondition:
err = grpc.ErrClientConnClosing
}
return err
}
func canceledByCaller(stopCtx context.Context, err error) bool {
if stopCtx.Err() == nil || err == nil {
return false
}
return err == context.Canceled || err == context.DeadlineExceeded
}

View file

@ -1,114 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/types"
"google.golang.org/grpc"
)
type (
Member pb.Member
MemberListResponse pb.MemberListResponse
MemberAddResponse pb.MemberAddResponse
MemberRemoveResponse pb.MemberRemoveResponse
MemberUpdateResponse pb.MemberUpdateResponse
)
type Cluster interface {
// MemberList lists the current cluster membership.
MemberList(ctx context.Context) (*MemberListResponse, error)
// MemberAdd adds a new member into the cluster.
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
// MemberRemove removes an existing member from the cluster.
MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
// MemberUpdate updates the peer addresses of the member.
MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
}
type cluster struct {
remote pb.ClusterClient
callOpts []grpc.CallOption
}
func NewCluster(c *Client) Cluster {
api := &cluster{remote: RetryClusterClient(c)}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
api := &cluster{remote: remote}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
// fail-fast before panic in rafthttp
if _, err := types.NewURLs(peerAddrs); err != nil {
return nil, err
}
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
return (*MemberAddResponse)(resp), nil
}
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
r := &pb.MemberRemoveRequest{ID: id}
resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
return (*MemberRemoveResponse)(resp), nil
}
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
// fail-fast before panic in rafthttp
if _, err := types.NewURLs(peerAddrs); err != nil {
return nil, err
}
// it is safe to retry on update.
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
// it is safe to retry on list.
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
if err == nil {
return (*MemberListResponse)(resp), nil
}
return nil, toErr(ctx, err)
}

View file

@ -1,51 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
// CompactOp represents a compact operation.
type CompactOp struct {
revision int64
physical bool
}
// CompactOption configures compact operation.
type CompactOption func(*CompactOp)
func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
for _, opt := range opts {
opt(op)
}
}
// OpCompact wraps slice CompactOption to create a CompactOp.
func OpCompact(rev int64, opts ...CompactOption) CompactOp {
ret := CompactOp{revision: rev}
ret.applyCompactOpts(opts)
return ret
}
func (op CompactOp) toRequest() *pb.CompactionRequest {
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
}
// WithCompactPhysical makes Compact wait until all compacted entries are
// removed from the etcd server's storage.
func WithCompactPhysical() CompactOption {
return func(op *CompactOp) { op.physical = true }
}

View file

@ -1,140 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
type CompareTarget int
type CompareResult int
const (
CompareVersion CompareTarget = iota
CompareCreated
CompareModified
CompareValue
)
type Cmp pb.Compare
func Compare(cmp Cmp, result string, v interface{}) Cmp {
var r pb.Compare_CompareResult
switch result {
case "=":
r = pb.Compare_EQUAL
case "!=":
r = pb.Compare_NOT_EQUAL
case ">":
r = pb.Compare_GREATER
case "<":
r = pb.Compare_LESS
default:
panic("Unknown result op")
}
cmp.Result = r
switch cmp.Target {
case pb.Compare_VALUE:
val, ok := v.(string)
if !ok {
panic("bad compare value")
}
cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
case pb.Compare_VERSION:
cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
case pb.Compare_CREATE:
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
case pb.Compare_MOD:
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
case pb.Compare_LEASE:
cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
default:
panic("Unknown compare type")
}
return cmp
}
func Value(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
}
func Version(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
}
func CreateRevision(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
}
func ModRevision(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
}
// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
// LeaseID is 0, otherwise known as `NoLease`.
func LeaseValue(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
}
// KeyBytes returns the byte slice holding with the comparison key.
func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
// WithKeyBytes sets the byte slice for the comparison key.
func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
// ValueBytes returns the byte slice holding the comparison value, if any.
func (cmp *Cmp) ValueBytes() []byte {
if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
return tu.Value
}
return nil
}
// WithValueBytes sets the byte slice for the comparison's value.
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
// WithRange sets the comparison to scan the range [key, end).
func (cmp Cmp) WithRange(end string) Cmp {
cmp.RangeEnd = []byte(end)
return cmp
}
// WithPrefix sets the comparison to scan all keys prefixed by the key.
func (cmp Cmp) WithPrefix() Cmp {
cmp.RangeEnd = getPrefix(cmp.Key)
return cmp
}
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
func mustInt64(val interface{}) int64 {
if v, ok := val.(int64); ok {
return v
}
if v, ok := val.(int); ok {
return int64(v)
}
panic("bad value")
}
// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
// int64 otherwise.
func mustInt64orLeaseID(val interface{}) int64 {
if v, ok := val.(LeaseID); ok {
return int64(v)
}
return mustInt64(val)
}

View file

@ -1,17 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package concurrency implements concurrency operations on top of
// etcd such as distributed locks, barriers, and elections.
package concurrency

View file

@ -1,245 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"context"
"errors"
"fmt"
v3 "github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/mvccpb"
)
var (
ErrElectionNotLeader = errors.New("election: not leader")
ErrElectionNoLeader = errors.New("election: no leader")
)
type Election struct {
session *Session
keyPrefix string
leaderKey string
leaderRev int64
leaderSession *Session
hdr *pb.ResponseHeader
}
// NewElection returns a new election on a given key prefix.
func NewElection(s *Session, pfx string) *Election {
return &Election{session: s, keyPrefix: pfx + "/"}
}
// ResumeElection initializes an election with a known leader.
func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
return &Election{
session: s,
leaderKey: leaderKey,
leaderRev: leaderRev,
leaderSession: s,
}
}
// Campaign puts a value as eligible for the election. It blocks until
// it is elected, an error occurs, or the context is cancelled.
func (e *Election) Campaign(ctx context.Context, val string) error {
s := e.session
client := e.session.Client()
k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
txn = txn.Else(v3.OpGet(k))
resp, err := txn.Commit()
if err != nil {
return err
}
e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
if !resp.Succeeded {
kv := resp.Responses[0].GetResponseRange().Kvs[0]
e.leaderRev = kv.CreateRevision
if string(kv.Value) != val {
if err = e.Proclaim(ctx, val); err != nil {
e.Resign(ctx)
return err
}
}
}
_, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
if err != nil {
// clean up in case of context cancel
select {
case <-ctx.Done():
e.Resign(client.Ctx())
default:
e.leaderSession = nil
}
return err
}
e.hdr = resp.Header
return nil
}
// Proclaim lets the leader announce a new value without another election.
func (e *Election) Proclaim(ctx context.Context, val string) error {
if e.leaderSession == nil {
return ErrElectionNotLeader
}
client := e.session.Client()
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
txn := client.Txn(ctx).If(cmp)
txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
tresp, terr := txn.Commit()
if terr != nil {
return terr
}
if !tresp.Succeeded {
e.leaderKey = ""
return ErrElectionNotLeader
}
e.hdr = tresp.Header
return nil
}
// Resign lets a leader start a new election.
func (e *Election) Resign(ctx context.Context) (err error) {
if e.leaderSession == nil {
return nil
}
client := e.session.Client()
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
if err == nil {
e.hdr = resp.Header
}
e.leaderKey = ""
e.leaderSession = nil
return err
}
// Leader returns the leader value for the current election.
func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
client := e.session.Client()
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
if err != nil {
return nil, err
} else if len(resp.Kvs) == 0 {
// no leader currently elected
return nil, ErrElectionNoLeader
}
return resp, nil
}
// Observe returns a channel that reliably observes ordered leader proposals
// as GetResponse values on every current elected leader key. It will not
// necessarily fetch all historical leader updates, but will always post the
// most recent leader value.
//
// The channel closes when the context is canceled or the underlying watcher
// is otherwise disrupted.
func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
retc := make(chan v3.GetResponse)
go e.observe(ctx, retc)
return retc
}
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
client := e.session.Client()
defer close(ch)
for {
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
if err != nil {
return
}
var kv *mvccpb.KeyValue
var hdr *pb.ResponseHeader
if len(resp.Kvs) == 0 {
cctx, cancel := context.WithCancel(ctx)
// wait for first key put on prefix
opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
wch := client.Watch(cctx, e.keyPrefix, opts...)
for kv == nil {
wr, ok := <-wch
if !ok || wr.Err() != nil {
cancel()
return
}
// only accept puts; a delete will make observe() spin
for _, ev := range wr.Events {
if ev.Type == mvccpb.PUT {
hdr, kv = &wr.Header, ev.Kv
// may have multiple revs; hdr.rev = the last rev
// set to kv's rev in case batch has multiple Puts
hdr.Revision = kv.ModRevision
break
}
}
}
cancel()
} else {
hdr, kv = resp.Header, resp.Kvs[0]
}
select {
case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
case <-ctx.Done():
return
}
cctx, cancel := context.WithCancel(ctx)
wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
keyDeleted := false
for !keyDeleted {
wr, ok := <-wch
if !ok {
cancel()
return
}
for _, ev := range wr.Events {
if ev.Type == mvccpb.DELETE {
keyDeleted = true
break
}
resp.Header = &wr.Header
resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
select {
case ch <- *resp:
case <-cctx.Done():
cancel()
return
}
}
}
cancel()
}
}
// Key returns the leader key if elected, empty string otherwise.
func (e *Election) Key() string { return e.leaderKey }
// Rev returns the leader key's creation revision, if elected.
func (e *Election) Rev() int64 { return e.leaderRev }
// Header is the response header from the last successful election proposal.
func (e *Election) Header() *pb.ResponseHeader { return e.hdr }

View file

@ -1,65 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"context"
"fmt"
v3 "github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/mvccpb"
)
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
cctx, cancel := context.WithCancel(ctx)
defer cancel()
var wr v3.WatchResponse
wch := client.Watch(cctx, key, v3.WithRev(rev))
for wr = range wch {
for _, ev := range wr.Events {
if ev.Type == mvccpb.DELETE {
return nil
}
}
}
if err := wr.Err(); err != nil {
return err
}
if err := ctx.Err(); err != nil {
return err
}
return fmt.Errorf("lost watcher waiting for delete")
}
// waitDeletes efficiently waits until all keys matching the prefix and no greater
// than the create revision.
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
for {
resp, err := client.Get(ctx, pfx, getOpts...)
if err != nil {
return nil, err
}
if len(resp.Kvs) == 0 {
return resp.Header, nil
}
lastKey := string(resp.Kvs[0].Key)
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
return nil, err
}
}
}

View file

@ -1,118 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"context"
"fmt"
"sync"
v3 "github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
// Mutex implements the sync Locker interface with etcd
type Mutex struct {
s *Session
pfx string
myKey string
myRev int64
hdr *pb.ResponseHeader
}
func NewMutex(s *Session, pfx string) *Mutex {
return &Mutex{s, pfx + "/", "", -1, nil}
}
// Lock locks the mutex with a cancelable context. If the context is canceled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
s := m.s
client := m.s.Client()
m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
// put self in lock waiters via myKey; oldest waiter holds lock
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
// reuse key in case this session already holds the lock
get := v3.OpGet(m.myKey)
// fetch current holder to complete uncontended path with only one RPC
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
if err != nil {
return err
}
m.myRev = resp.Header.Revision
if !resp.Succeeded {
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
}
// if no key on prefix / the minimum rev is key, already hold the lock
ownerKey := resp.Responses[1].GetResponseRange().Kvs
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
m.hdr = resp.Header
return nil
}
// wait for deletion revisions prior to myKey
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
// release lock key if cancelled
select {
case <-ctx.Done():
m.Unlock(client.Ctx())
default:
m.hdr = hdr
}
return werr
}
func (m *Mutex) Unlock(ctx context.Context) error {
client := m.s.Client()
if _, err := client.Delete(ctx, m.myKey); err != nil {
return err
}
m.myKey = "\x00"
m.myRev = -1
return nil
}
func (m *Mutex) IsOwner() v3.Cmp {
return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
}
func (m *Mutex) Key() string { return m.myKey }
// Header is the response header received from etcd on acquiring the lock.
func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
type lockerMutex struct{ *Mutex }
func (lm *lockerMutex) Lock() {
client := lm.s.Client()
if err := lm.Mutex.Lock(client.Ctx()); err != nil {
panic(err)
}
}
func (lm *lockerMutex) Unlock() {
client := lm.s.Client()
if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
panic(err)
}
}
// NewLocker creates a sync.Locker backed by an etcd mutex.
func NewLocker(s *Session, pfx string) sync.Locker {
return &lockerMutex{NewMutex(s, pfx)}
}

Some files were not shown because too many files have changed in this diff Show more