Remove etcd v2

This commit is contained in:
Gérald Croës 2018-08-07 19:12:03 +02:00 committed by Traefiker Bot
parent 9cd47dd2aa
commit 1ec4e03738
61 changed files with 49 additions and 60679 deletions

21
Gopkg.lock generated
View file

@ -152,7 +152,6 @@
"store", "store",
"store/boltdb", "store/boltdb",
"store/consul", "store/consul",
"store/etcd/v2",
"store/etcd/v3", "store/etcd/v3",
"store/zookeeper" "store/zookeeper"
] ]
@ -290,26 +289,16 @@
name = "github.com/coreos/etcd" name = "github.com/coreos/etcd"
packages = [ packages = [
"auth/authpb", "auth/authpb",
"client",
"clientv3", "clientv3",
"clientv3/concurrency", "clientv3/concurrency",
"etcdserver/api/v3rpc/rpctypes", "etcdserver/api/v3rpc/rpctypes",
"etcdserver/etcdserverpb", "etcdserver/etcdserverpb",
"mvcc/mvccpb", "mvcc/mvccpb",
"pkg/pathutil", "pkg/types"
"pkg/srv",
"pkg/types",
"version"
] ]
revision = "70c8726202dd91e482fb4029fd14af1d4ed1d5af" revision = "70c8726202dd91e482fb4029fd14af1d4ed1d5af"
version = "v3.3.5" version = "v3.3.5"
[[projects]]
name = "github.com/coreos/go-semver"
packages = ["semver"]
revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6"
version = "v0.2.0"
[[projects]] [[projects]]
name = "github.com/coreos/go-systemd" name = "github.com/coreos/go-systemd"
packages = ["daemon"] packages = ["daemon"]
@ -1238,12 +1227,6 @@
revision = "ed3a127ec5fef7ae9ea95b01b542c47fbd999ce5" revision = "ed3a127ec5fef7ae9ea95b01b542c47fbd999ce5"
version = "v1.5.0" version = "v1.5.0"
[[projects]]
name = "github.com/ugorji/go"
packages = ["codec"]
revision = "b4c50a2b199d93b13dc15e78929cfb23bfdf21ab"
version = "v1.1.1"
[[projects]] [[projects]]
name = "github.com/unrolled/render" name = "github.com/unrolled/render"
packages = ["."] packages = ["."]
@ -1769,6 +1752,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "0b453fff40221eb9d779eeb4ad75fc076e8cbd93c1ecf058f4b9411d4afddf68" inputs-digest = "8eb11befb583dfb89320dd7b7aea9e54b40e0523e60e66ed418788394b873d44"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View file

@ -31,16 +31,6 @@ watch = true
# #
prefix = "/traefik" prefix = "/traefik"
# Force to use API V3 (otherwise still use API V2)
#
# Deprecated
#
# Optional
# Default: false
#
useAPIV3 = true
# Override default configuration template. # Override default configuration template.
# For advanced users :) # For advanced users :)
# #
@ -69,7 +59,3 @@ useAPIV3 = true
To enable constraints see [provider-specific constraints section](/configuration/commons/#provider-specific). To enable constraints see [provider-specific constraints section](/configuration/commons/#provider-specific).
Please refer to the [Key Value storage structure](/user-guide/kv-config/#key-value-storage-structure) section to get documentation on Traefik KV structure. Please refer to the [Key Value storage structure](/user-guide/kv-config/#key-value-storage-structure) section to get documentation on Traefik KV structure.
!!! note
The option `useAPIV3` allows using Etcd API V3 only if it's set to true.
This option is **deprecated** and API V2 won't be supported in the future.

View file

@ -370,10 +370,6 @@ As a result, it may be possible for Træfik to read an intermediate configuratio
To solve this problem, Træfik supports a special key called `/traefik/alias`. To solve this problem, Træfik supports a special key called `/traefik/alias`.
If set, Træfik use the value as an alternative key prefix. If set, Træfik use the value as an alternative key prefix.
!!! note
The field `useAPIV3` allows using Etcd V3 API which should support updating multiple keys atomically with Etcd.
Etcd API V2 is deprecated and, in the future, Træfik will support API V3 by default.
Given the key structure below, Træfik will use the `http://172.17.0.2:80` as its only backend (frontend keys have been omitted for brevity). Given the key structure below, Træfik will use the `http://172.17.0.2:80` as its only backend (frontend keys have been omitted for brevity).
| Key | Value | | Key | Value |

View file

@ -88,8 +88,7 @@ start_storeconfig_etcd3() {
[etcd] [etcd]
endpoint = "10.0.1.12:2379" endpoint = "10.0.1.12:2379"
watch = true watch = true
prefix = "/traefik" prefix = "/traefik"' >> $basedir/traefik.toml
useAPIV3 = true' >> $basedir/traefik.toml
up_environment storeconfig up_environment storeconfig
rm -f $basedir/traefik.toml rm -f $basedir/traefik.toml
waiting_counter=5 waiting_counter=5
@ -178,7 +177,7 @@ main() {
case $2 in case $2 in
"--etcd3") "--etcd3")
echo "USE ETCD V3 AS KV STORE" echo "USE ETCD V3 AS KV STORE"
export TRAEFIK_CMD="--etcd --etcd.endpoint=10.0.1.12:2379 --etcd.useAPIV3=true" export TRAEFIK_CMD="--etcd --etcd.endpoint=10.0.1.12:2379"
start_boulder && \ start_boulder && \
start_etcd3 && \ start_etcd3 && \
start_storeconfig_etcd3 && \ start_storeconfig_etcd3 && \

View file

@ -13,32 +13,54 @@ import (
"github.com/abronan/valkeyrie/store/etcd/v3" "github.com/abronan/valkeyrie/store/etcd/v3"
"github.com/containous/traefik/integration/try" "github.com/containous/traefik/integration/try"
"github.com/go-check/check" "github.com/go-check/check"
checker "github.com/vdemeester/shakers" checker "github.com/vdemeester/shakers"
) )
const ( const (
// Services IP addresses fixed in the configuration
ipEtcd = "172.18.0.2"
ipWhoami01 = "172.18.0.3"
ipWhoami02 = "172.18.0.4"
ipWhoami03 = "172.18.0.5"
ipWhoami04 = "172.18.0.6"
traefikEtcdURL = "http://127.0.0.1:8000/" traefikEtcdURL = "http://127.0.0.1:8000/"
traefikWebEtcdURL = "http://127.0.0.1:8081/" traefikWebEtcdURL = "http://127.0.0.1:8081/"
) )
var (
ipEtcd string
ipWhoami01 string
ipWhoami02 string
ipWhoami03 string
ipWhoami04 string
)
// Etcd test suites (using libcompose) // Etcd test suites (using libcompose)
type Etcd3Suite struct { type Etcd3Suite struct {
BaseSuite BaseSuite
kv store.Store kv store.Store
} }
func (s *Etcd3Suite) SetUpTest(c *check.C) { func (s *Etcd3Suite) getIPAddress(c *check.C, service, defaultIP string) string {
var ip string
for _, value := range s.composeProject.Container(c, service).NetworkSettings.Networks {
if len(value.IPAddress) > 0 {
ip = value.IPAddress
break
}
}
if len(ip) == 0 {
return defaultIP
}
return ip
}
func (s *Etcd3Suite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "etcd3") s.createComposeProject(c, "etcd3")
s.composeProject.Start(c) s.composeProject.Start(c)
ipEtcd = s.getIPAddress(c, "etcd", "172.18.0.2")
ipWhoami01 = s.getIPAddress(c, "whoami1", "172.18.0.3")
ipWhoami02 = s.getIPAddress(c, "whoami2", "172.18.0.4")
ipWhoami03 = s.getIPAddress(c, "whoami3", "172.18.0.5")
ipWhoami04 = s.getIPAddress(c, "whoami4", "172.18.0.6")
etcdv3.Register() etcdv3.Register()
url := ipEtcd + ":2379" url := ipEtcd + ":2379"
kv, err := valkeyrie.NewStore( kv, err := valkeyrie.NewStore(
@ -49,7 +71,7 @@ func (s *Etcd3Suite) SetUpTest(c *check.C) {
}, },
) )
if err != nil { if err != nil {
c.Fatal("Cannot create store etcd") c.Fatalf("Cannot create store etcd %v", err)
} }
s.kv = kv s.kv = kv
@ -62,21 +84,22 @@ func (s *Etcd3Suite) SetUpTest(c *check.C) {
} }
func (s *Etcd3Suite) TearDownTest(c *check.C) { func (s *Etcd3Suite) TearDownTest(c *check.C) {
// Delete all Traefik keys from ETCD
s.kv.DeleteTree("/traefik")
}
func (s *Etcd3Suite) TearDownSuite(c *check.C) {
// shutdown and delete compose project // shutdown and delete compose project
if s.composeProject != nil { if s.composeProject != nil {
s.composeProject.Stop(c) s.composeProject.Stop(c)
} }
} }
func (s *Etcd3Suite) TearDownSuite(c *check.C) {}
func (s *Etcd3Suite) TestSimpleConfiguration(c *check.C) { func (s *Etcd3Suite) TestSimpleConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct { file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct {
EtcdHost string EtcdHost string
UseAPIV3 bool
}{ }{
ipEtcd, ipEtcd,
true,
}) })
defer os.Remove(file) defer os.Remove(file)
@ -95,10 +118,8 @@ func (s *Etcd3Suite) TestSimpleConfiguration(c *check.C) {
func (s *Etcd3Suite) TestNominalConfiguration(c *check.C) { func (s *Etcd3Suite) TestNominalConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct { file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct {
EtcdHost string EtcdHost string
UseAPIV3 bool
}{ }{
ipEtcd, ipEtcd,
true,
}) })
defer os.Remove(file) defer os.Remove(file)
@ -219,8 +240,7 @@ func (s *Etcd3Suite) TestGlobalConfiguration(c *check.C) {
cmd, display := s.traefikCmd( cmd, display := s.traefikCmd(
withConfigFile("fixtures/simple_web.toml"), withConfigFile("fixtures/simple_web.toml"),
"--etcd", "--etcd",
"--etcd.endpoint="+ipEtcd+":4001", "--etcd.endpoint="+ipEtcd+":4001")
"--etcd.useAPIV3=true")
defer display(c) defer display(c)
err = cmd.Start() err = cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
@ -294,8 +314,7 @@ func (s *Etcd3Suite) TestCertificatesContentWithSNIConfigHandshake(c *check.C) {
cmd, display := s.traefikCmd( cmd, display := s.traefikCmd(
withConfigFile("fixtures/simple_web.toml"), withConfigFile("fixtures/simple_web.toml"),
"--etcd", "--etcd",
"--etcd.endpoint="+ipEtcd+":4001", "--etcd.endpoint="+ipEtcd+":4001")
"--etcd.useAPIV3=true")
defer display(c) defer display(c)
// Copy the contents of the certificate files into ETCD // Copy the contents of the certificate files into ETCD
@ -397,8 +416,7 @@ func (s *Etcd3Suite) TestCommandStoreConfig(c *check.C) {
cmd, display := s.traefikCmd( cmd, display := s.traefikCmd(
"storeconfig", "storeconfig",
withConfigFile("fixtures/simple_web.toml"), withConfigFile("fixtures/simple_web.toml"),
"--etcd.endpoint="+ipEtcd+":4001", "--etcd.endpoint="+ipEtcd+":4001")
"--etcd.useAPIV3=true")
defer display(c) defer display(c)
err := cmd.Start() err := cmd.Start()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
@ -433,8 +451,7 @@ func (s *Etcd3Suite) TestSNIDynamicTlsConfig(c *check.C) {
cmd, display := s.traefikCmd( cmd, display := s.traefikCmd(
withConfigFile("fixtures/etcd/simple_https.toml"), withConfigFile("fixtures/etcd/simple_https.toml"),
"--etcd", "--etcd",
"--etcd.endpoint="+ipEtcd+":4001", "--etcd.endpoint="+ipEtcd+":4001")
"--etcd.useAPIV3=true")
defer display(c) defer display(c)
snitestComCert, err := ioutil.ReadFile("fixtures/https/snitest.com.cert") snitestComCert, err := ioutil.ReadFile("fixtures/https/snitest.com.cert")
@ -571,8 +588,7 @@ func (s *Etcd3Suite) TestDeleteSNIDynamicTlsConfig(c *check.C) {
cmd, display := s.traefikCmd( cmd, display := s.traefikCmd(
withConfigFile("fixtures/etcd/simple_https.toml"), withConfigFile("fixtures/etcd/simple_https.toml"),
"--etcd", "--etcd",
"--etcd.endpoint="+ipEtcd+":4001", "--etcd.endpoint="+ipEtcd+":4001")
"--etcd.useAPIV3=true")
defer display(c) defer display(c)
// prepare to config // prepare to config

View file

@ -1,583 +0,0 @@
package integration
import (
"crypto/tls"
"io/ioutil"
"net/http"
"os"
"strings"
"time"
"github.com/abronan/valkeyrie"
"github.com/abronan/valkeyrie/store"
"github.com/abronan/valkeyrie/store/etcd/v2"
"github.com/containous/traefik/integration/try"
"github.com/go-check/check"
checker "github.com/vdemeester/shakers"
)
// Etcd test suites (using libcompose)
type EtcdSuite struct {
BaseSuite
kv store.Store
}
func (s *EtcdSuite) SetUpTest(c *check.C) {
s.createComposeProject(c, "etcd")
s.composeProject.Start(c)
etcd.Register()
url := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress + ":2379"
kv, err := valkeyrie.NewStore(
store.ETCD,
[]string{url},
&store.Config{
ConnectionTimeout: 10 * time.Second,
},
)
if err != nil {
c.Fatal("Cannot create store etcd")
}
s.kv = kv
// wait for etcd
err = try.Do(60*time.Second, func() error {
_, err := kv.Exists("test", nil)
return err
})
c.Assert(err, checker.IsNil)
}
func (s *EtcdSuite) TearDownTest(c *check.C) {
// shutdown and delete compose project
if s.composeProject != nil {
s.composeProject.Stop(c)
}
}
func (s *EtcdSuite) TearDownSuite(c *check.C) {}
func (s *EtcdSuite) TestSimpleConfiguration(c *check.C) {
etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct {
EtcdHost string
UseAPIV3 bool
}{
etcdHost,
false,
})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer cmd.Process.Kill()
// TODO validate : run on 80
// Expected a 404 as we did not configure anything
err = try.GetRequest("http://127.0.0.1:8000/", 1000*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
}
func (s *EtcdSuite) TestNominalConfiguration(c *check.C) {
etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct {
EtcdHost string
UseAPIV3 bool
}{
etcdHost,
false,
})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer cmd.Process.Kill()
whoami1IP := s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress
whoami2IP := s.composeProject.Container(c, "whoami2").NetworkSettings.IPAddress
whoami3IP := s.composeProject.Container(c, "whoami3").NetworkSettings.IPAddress
whoami4IP := s.composeProject.Container(c, "whoami4").NetworkSettings.IPAddress
backend1 := map[string]string{
"/traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5",
"/traefik/backends/backend1/servers/server1/url": "http://" + whoami1IP + ":80",
"/traefik/backends/backend1/servers/server1/weight": "10",
"/traefik/backends/backend1/servers/server2/url": "http://" + whoami2IP + ":80",
"/traefik/backends/backend1/servers/server2/weight": "1",
}
backend2 := map[string]string{
"/traefik/backends/backend2/loadbalancer/method": "drr",
"/traefik/backends/backend2/servers/server1/url": "http://" + whoami3IP + ":80",
"/traefik/backends/backend2/servers/server1/weight": "1",
"/traefik/backends/backend2/servers/server2/url": "http://" + whoami4IP + ":80",
"/traefik/backends/backend2/servers/server2/weight": "2",
}
frontend1 := map[string]string{
"/traefik/frontends/frontend1/backend": "backend2",
"/traefik/frontends/frontend1/entrypoints": "http",
"/traefik/frontends/frontend1/priority": "1",
"/traefik/frontends/frontend1/routes/test_1/rule": "Host:test.localhost",
}
frontend2 := map[string]string{
"/traefik/frontends/frontend2/backend": "backend1",
"/traefik/frontends/frontend2/entrypoints": "http",
"/traefik/frontends/frontend2/priority": "10",
"/traefik/frontends/frontend2/routes/test_2/rule": "Path:/test",
}
for key, value := range backend1 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range backend2 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range frontend1 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range frontend2 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
// wait for etcd
err = try.Do(60*time.Second, func() error {
_, err := s.kv.Exists("/traefik/frontends/frontend2/routes/test_2/rule", nil)
return err
})
c.Assert(err, checker.IsNil)
// wait for Træfik
err = try.GetRequest("http://127.0.0.1:8081/api/providers", 60*time.Second, try.BodyContains("Path:/test"))
c.Assert(err, checker.IsNil)
client := &http.Client{}
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
req.Host = "test.localhost"
response, err := client.Do(req)
c.Assert(err, checker.IsNil)
c.Assert(response.StatusCode, checker.Equals, http.StatusOK)
body, err := ioutil.ReadAll(response.Body)
c.Assert(err, checker.IsNil)
if !strings.Contains(string(body), whoami3IP) &&
!strings.Contains(string(body), whoami4IP) {
c.Fail()
}
req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/test", nil)
c.Assert(err, checker.IsNil)
response, err = client.Do(req)
c.Assert(err, checker.IsNil)
c.Assert(response.StatusCode, checker.Equals, http.StatusOK)
body, err = ioutil.ReadAll(response.Body)
c.Assert(err, checker.IsNil)
if !strings.Contains(string(body), whoami1IP) &&
!strings.Contains(string(body), whoami2IP) {
c.Fail()
}
req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/test2", nil)
c.Assert(err, checker.IsNil)
req.Host = "test2.localhost"
resp, err := client.Do(req)
c.Assert(err, checker.IsNil)
c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound)
resp, err = http.Get("http://127.0.0.1:8000/")
c.Assert(err, checker.IsNil)
c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound)
}
func (s *EtcdSuite) TestGlobalConfiguration(c *check.C) {
etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress
err := s.kv.Put("/traefik/entrypoints/http/address", []byte(":8001"), nil)
c.Assert(err, checker.IsNil)
// wait for etcd
err = try.Do(60*time.Second, func() error {
_, err := s.kv.Exists("/traefik/entrypoints/http/address", nil)
return err
})
c.Assert(err, checker.IsNil)
// start Træfik
cmd, display := s.traefikCmd(
withConfigFile("fixtures/simple_web.toml"),
"--etcd",
"--etcd.endpoint="+etcdHost+":4001")
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer cmd.Process.Kill()
whoami1IP := s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress
whoami2IP := s.composeProject.Container(c, "whoami2").NetworkSettings.IPAddress
whoami3IP := s.composeProject.Container(c, "whoami3").NetworkSettings.IPAddress
whoami4IP := s.composeProject.Container(c, "whoami4").NetworkSettings.IPAddress
backend1 := map[string]string{
"/traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5",
"/traefik/backends/backend1/servers/server1/url": "http://" + whoami1IP + ":80",
"/traefik/backends/backend1/servers/server1/weight": "10",
"/traefik/backends/backend1/servers/server2/url": "http://" + whoami2IP + ":80",
"/traefik/backends/backend1/servers/server2/weight": "1",
}
backend2 := map[string]string{
"/traefik/backends/backend2/loadbalancer/method": "drr",
"/traefik/backends/backend2/servers/server1/url": "http://" + whoami3IP + ":80",
"/traefik/backends/backend2/servers/server1/weight": "1",
"/traefik/backends/backend2/servers/server2/url": "http://" + whoami4IP + ":80",
"/traefik/backends/backend2/servers/server2/weight": "2",
}
frontend1 := map[string]string{
"/traefik/frontends/frontend1/backend": "backend2",
"/traefik/frontends/frontend1/entrypoints": "http",
"/traefik/frontends/frontend1/priority": "1",
"/traefik/frontends/frontend1/routes/test_1/rule": "Host:test.localhost",
}
frontend2 := map[string]string{
"/traefik/frontends/frontend2/backend": "backend1",
"/traefik/frontends/frontend2/entrypoints": "http",
"/traefik/frontends/frontend2/priority": "10",
"/traefik/frontends/frontend2/routes/test_2/rule": "Path:/test",
}
for key, value := range backend1 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range backend2 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range frontend1 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range frontend2 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
// wait for etcd
err = try.Do(60*time.Second, func() error {
_, err := s.kv.Exists("/traefik/frontends/frontend2/routes/test_2/rule", nil)
return err
})
c.Assert(err, checker.IsNil)
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("Path:/test"))
c.Assert(err, checker.IsNil)
// check
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8001/", nil)
c.Assert(err, checker.IsNil)
req.Host = "test.localhost"
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
}
func (s *EtcdSuite) TestCertificatesContentWithSNIConfigHandshake(c *check.C) {
etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress
// start Træfik
cmd, display := s.traefikCmd(
withConfigFile("fixtures/simple_web.toml"),
"--etcd",
"--etcd.endpoint="+etcdHost+":4001")
defer display(c)
whoami1IP := s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress
whoami2IP := s.composeProject.Container(c, "whoami2").NetworkSettings.IPAddress
whoami3IP := s.composeProject.Container(c, "whoami3").NetworkSettings.IPAddress
whoami4IP := s.composeProject.Container(c, "whoami4").NetworkSettings.IPAddress
// Copy the contents of the certificate files into ETCD
snitestComCert, err := ioutil.ReadFile("fixtures/https/snitest.com.cert")
c.Assert(err, checker.IsNil)
snitestComKey, err := ioutil.ReadFile("fixtures/https/snitest.com.key")
c.Assert(err, checker.IsNil)
snitestOrgCert, err := ioutil.ReadFile("fixtures/https/snitest.org.cert")
c.Assert(err, checker.IsNil)
snitestOrgKey, err := ioutil.ReadFile("fixtures/https/snitest.org.key")
c.Assert(err, checker.IsNil)
globalConfig := map[string]string{
"/traefik/entrypoints/https/address": ":4443",
"/traefik/entrypoints/https/tls/certificates/0/certfile": string(snitestComCert),
"/traefik/entrypoints/https/tls/certificates/0/keyfile": string(snitestComKey),
"/traefik/entrypoints/https/tls/certificates/1/certfile": string(snitestOrgCert),
"/traefik/entrypoints/https/tls/certificates/1/keyfile": string(snitestOrgKey),
"/traefik/defaultentrypoints/0": "https",
}
backend1 := map[string]string{
"/traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5",
"/traefik/backends/backend1/servers/server1/url": "http://" + whoami1IP + ":80",
"/traefik/backends/backend1/servers/server1/weight": "10",
"/traefik/backends/backend1/servers/server2/url": "http://" + whoami2IP + ":80",
"/traefik/backends/backend1/servers/server2/weight": "1",
}
backend2 := map[string]string{
"/traefik/backends/backend2/loadbalancer/method": "drr",
"/traefik/backends/backend2/servers/server1/url": "http://" + whoami3IP + ":80",
"/traefik/backends/backend2/servers/server1/weight": "1",
"/traefik/backends/backend2/servers/server2/url": "http://" + whoami4IP + ":80",
"/traefik/backends/backend2/servers/server2/weight": "2",
}
frontend1 := map[string]string{
"/traefik/frontends/frontend1/backend": "backend2",
"/traefik/frontends/frontend1/entrypoints": "http",
"/traefik/frontends/frontend1/priority": "1",
"/traefik/frontends/frontend1/routes/test_1/rule": "Host:snitest.com",
}
frontend2 := map[string]string{
"/traefik/frontends/frontend2/backend": "backend1",
"/traefik/frontends/frontend2/entrypoints": "http",
"/traefik/frontends/frontend2/priority": "10",
"/traefik/frontends/frontend2/routes/test_2/rule": "Host:snitest.org",
}
for key, value := range globalConfig {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range backend1 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range backend2 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range frontend1 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range frontend2 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
// wait for etcd
err = try.Do(60*time.Second, try.KVExists(s.kv, "/traefik/frontends/frontend2/routes/test_2/rule"))
c.Assert(err, checker.IsNil)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer cmd.Process.Kill()
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("Host:snitest.org"))
c.Assert(err, checker.IsNil)
// check
tlsConfig := &tls.Config{
InsecureSkipVerify: true,
ServerName: "snitest.com",
}
conn, err := tls.Dial("tcp", "127.0.0.1:4443", tlsConfig)
c.Assert(err, checker.IsNil, check.Commentf("failed to connect to server"))
defer conn.Close()
err = conn.Handshake()
c.Assert(err, checker.IsNil, check.Commentf("TLS handshake error"))
cs := conn.ConnectionState()
err = cs.PeerCertificates[0].VerifyHostname("snitest.com")
c.Assert(err, checker.IsNil, check.Commentf("certificate did not match SNI servername"))
}
func (s *EtcdSuite) TestCommandStoreConfig(c *check.C) {
etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress
cmd, display := s.traefikCmd(
"storeconfig",
withConfigFile("fixtures/simple_web.toml"),
"--etcd.endpoint="+etcdHost+":4001")
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
// wait for traefik finish without error
err = cmd.Wait()
c.Assert(err, checker.IsNil)
// CHECK
checkmap := map[string]string{
"/traefik/loglevel": "DEBUG",
"/traefik/defaultentrypoints/0": "http",
"/traefik/entrypoints/http/address": ":8000",
"/traefik/api/entrypoint": "traefik",
"/traefik/etcd/endpoint": etcdHost + ":4001",
}
for key, value := range checkmap {
var p *store.KVPair
err = try.Do(60*time.Second, func() error {
p, err = s.kv.Get(key, nil)
return err
})
c.Assert(err, checker.IsNil)
c.Assert(string(p.Value), checker.Equals, value)
}
}
func (s *EtcdSuite) TestSNIDynamicTlsConfig(c *check.C) {
etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress
// start Træfik
cmd, display := s.traefikCmd(
withConfigFile("fixtures/etcd/simple_https.toml"),
"--etcd",
"--etcd.endpoint="+etcdHost+":4001",
"--etcd.watch=true",
)
defer display(c)
// prepare to config
whoami1IP := s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress
whoami2IP := s.composeProject.Container(c, "whoami2").NetworkSettings.IPAddress
whoami3IP := s.composeProject.Container(c, "whoami3").NetworkSettings.IPAddress
whoami4IP := s.composeProject.Container(c, "whoami4").NetworkSettings.IPAddress
snitestComCert, err := ioutil.ReadFile("fixtures/https/snitest.com.cert")
c.Assert(err, checker.IsNil)
snitestComKey, err := ioutil.ReadFile("fixtures/https/snitest.com.key")
c.Assert(err, checker.IsNil)
snitestOrgCert, err := ioutil.ReadFile("fixtures/https/snitest.org.cert")
c.Assert(err, checker.IsNil)
snitestOrgKey, err := ioutil.ReadFile("fixtures/https/snitest.org.key")
c.Assert(err, checker.IsNil)
backend1 := map[string]string{
"/traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5",
"/traefik/backends/backend1/servers/server1/url": "http://" + whoami1IP + ":80",
"/traefik/backends/backend1/servers/server1/weight": "1",
"/traefik/backends/backend1/servers/server2/url": "http://" + whoami2IP + ":80",
"/traefik/backends/backend1/servers/server2/weight": "1",
}
backend2 := map[string]string{
"/traefik/backends/backend2/loadbalancer/method": "drr",
"/traefik/backends/backend2/servers/server1/url": "http://" + whoami3IP + ":80",
"/traefik/backends/backend2/servers/server1/weight": "1",
"/traefik/backends/backend2/servers/server2/url": "http://" + whoami4IP + ":80",
"/traefik/backends/backend2/servers/server2/weight": "1",
}
frontend1 := map[string]string{
"/traefik/frontends/frontend1/backend": "backend2",
"/traefik/frontends/frontend1/entrypoints": "https",
"/traefik/frontends/frontend1/priority": "1",
"/traefik/frontends/frontend1/routes/test_1/rule": "Host:snitest.com",
}
frontend2 := map[string]string{
"/traefik/frontends/frontend2/backend": "backend1",
"/traefik/frontends/frontend2/entrypoints": "https",
"/traefik/frontends/frontend2/priority": "10",
"/traefik/frontends/frontend2/routes/test_2/rule": "Host:snitest.org",
}
tlsconfigure1 := map[string]string{
"/traefik/tls/snitestcom/entrypoints": "https",
"/traefik/tls/snitestcom/certificate/keyfile": string(snitestComKey),
"/traefik/tls/snitestcom/certificate/certfile": string(snitestComCert),
}
tlsconfigure2 := map[string]string{
"/traefik/tls/snitestorg/entrypoints": "https",
"/traefik/tls/snitestorg/certificate/keyfile": string(snitestOrgKey),
"/traefik/tls/snitestorg/certificate/certfile": string(snitestOrgCert),
}
// config backends,frontends and first tls keypair
for key, value := range backend1 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range backend2 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range frontend1 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range frontend2 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
for key, value := range tlsconfigure1 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
tr1 := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
ServerName: "snitest.com",
},
}
tr2 := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
ServerName: "snitest.org",
},
}
// wait for etcd
err = try.Do(60*time.Second, func() error {
_, err := s.kv.Get("/traefik/tls/snitestcom/certificate/keyfile", nil)
return err
})
c.Assert(err, checker.IsNil)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer cmd.Process.Kill()
req, err := http.NewRequest(http.MethodGet, "https://127.0.0.1:4443/", nil)
c.Assert(err, checker.IsNil)
req.Host = tr1.TLSClientConfig.ServerName
req.Header.Set("Host", tr1.TLSClientConfig.ServerName)
req.Header.Set("Accept", "*/*")
err = try.RequestWithTransport(req, 30*time.Second, tr1, try.HasCn(tr1.TLSClientConfig.ServerName))
c.Assert(err, checker.IsNil)
// now we configure the second keypair in etcd and the request for host "snitest.org" will use the second keypair
for key, value := range tlsconfigure2 {
err := s.kv.Put(key, []byte(value), nil)
c.Assert(err, checker.IsNil)
}
// wait for etcd
err = try.Do(60*time.Second, func() error {
_, err := s.kv.Get("/traefik/tls/snitestorg/certificate/keyfile", nil)
return err
})
c.Assert(err, checker.IsNil)
req, err = http.NewRequest(http.MethodGet, "https://127.0.0.1:4443/", nil)
c.Assert(err, checker.IsNil)
req.Host = tr2.TLSClientConfig.ServerName
req.Header.Set("Host", tr2.TLSClientConfig.ServerName)
req.Header.Set("Accept", "*/*")
err = try.RequestWithTransport(req, 30*time.Second, tr2, try.HasCn(tr2.TLSClientConfig.ServerName))
c.Assert(err, checker.IsNil)
}

View file

@ -13,7 +13,6 @@ logLevel = "DEBUG"
endpoint = "{{.EtcdHost}}:2379" endpoint = "{{.EtcdHost}}:2379"
prefix = "/traefik" prefix = "/traefik"
watch = true watch = true
useAPIV3 = {{.UseAPIV3}}
[api] [api]
entryPoint = "api" entryPoint = "api"

View file

@ -45,7 +45,6 @@ func init() {
check.Suite(&DockerComposeSuite{}) check.Suite(&DockerComposeSuite{})
check.Suite(&DockerSuite{}) check.Suite(&DockerSuite{})
check.Suite(&DynamoDBSuite{}) check.Suite(&DynamoDBSuite{})
check.Suite(&EtcdSuite{})
check.Suite(&ErrorPagesSuite{}) check.Suite(&ErrorPagesSuite{})
check.Suite(&EurekaSuite{}) check.Suite(&EurekaSuite{})
check.Suite(&FileSuite{}) check.Suite(&FileSuite{})

View file

@ -1,14 +0,0 @@
etcd:
image: containous/docker-etcd
whoami1:
image: emilevauge/whoami
whoami2:
image: emilevauge/whoami
whoami3:
image: emilevauge/whoami
whoami4:
image: emilevauge/whoami

View file

@ -4,59 +4,30 @@ services:
etcd: etcd:
image: quay.io/coreos/etcd:v3.2.9 image: quay.io/coreos/etcd:v3.2.9
command: /usr/local/bin/etcd --data-dir=/etcd-data --name node1 --initial-advertise-peer-urls http://172.18.0.2:2380 --listen-peer-urls http://172.18.0.2:2380 --advertise-client-urls http://172.18.0.2:2379,http://172.18.0.2:4001 --listen-client-urls http://172.18.0.2:2379,http://172.18.0.2:4001 --initial-cluster node1=http://172.18.0.2:2380 --debug command: /usr/local/bin/etcd --data-dir=/etcd-data --name node1 --initial-advertise-peer-urls http://etcd:2380 --listen-peer-urls http://0.0.0.0:2380 --advertise-client-urls http://etcd:2379,http://etcd:4001 --listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 --initial-cluster node1=http://etcd:2380 --debug
expose: expose:
- 2380 - 2380
- 2379 - 2379
- 4001 - 4001
- 7001 - 7001
# networks:
# etcd_net:
# ipv4_address: 172.10.1.2
whoami1: whoami1:
image: emilevauge/whoami image: emilevauge/whoami
# depends_on option activate because libcompose (used by libkermit) does not support fix IP yet...
# Remove it ASAP
depends_on: depends_on:
- etcd - etcd
# networks:
# etcd_net:
# ipv4_address: 172.10.1.3
whoami2: whoami2:
image: emilevauge/whoami image: emilevauge/whoami
# depends_on option activate because libcompose (used by libkermit) does not support fix IP yet...
# Remove it ASAP
depends_on: depends_on:
- whoami1 - whoami1
# networks:
# etcd_net:
# ipv4_address: 172.10.1.4
whoami3: whoami3:
image: emilevauge/whoami image: emilevauge/whoami
# depends_on option activate because libcompose (used by libkermit) does not support fix IP yet...
# Remove it ASAP
depends_on: depends_on:
- whoami2 - whoami2
# networks:
# etcd_net:
# ipv4_address: 172.10.1.5
whoami4: whoami4:
image: emilevauge/whoami image: emilevauge/whoami
# depends_on option activate because libcompose (used by libkermit) does not support fix IP yet...
# Remove it ASAP
depends_on: depends_on:
- whoami3 - whoami3
# networks:
# etcd_net:
# ipv4_address: 172.10.1.6
#networks:
# etcd_net:
# driver: bridge
# ipam:
# config:
# - subnet: 172.10.1.0/28

View file

@ -4,9 +4,7 @@ import (
"fmt" "fmt"
"github.com/abronan/valkeyrie/store" "github.com/abronan/valkeyrie/store"
"github.com/abronan/valkeyrie/store/etcd/v2"
"github.com/abronan/valkeyrie/store/etcd/v3" "github.com/abronan/valkeyrie/store/etcd/v3"
"github.com/containous/traefik/log"
"github.com/containous/traefik/provider" "github.com/containous/traefik/provider"
"github.com/containous/traefik/provider/kv" "github.com/containous/traefik/provider/kv"
"github.com/containous/traefik/safe" "github.com/containous/traefik/safe"
@ -18,7 +16,6 @@ var _ provider.Provider = (*Provider)(nil)
// Provider holds configurations of the provider. // Provider holds configurations of the provider.
type Provider struct { type Provider struct {
kv.Provider `mapstructure:",squash" export:"true"` kv.Provider `mapstructure:",squash" export:"true"`
UseAPIV3 bool `description:"Use ETCD API V3" export:"true"`
} }
// Init the provider // Init the provider
@ -45,14 +42,7 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
// CreateStore creates the KV store // CreateStore creates the KV store
func (p *Provider) CreateStore() (store.Store, error) { func (p *Provider) CreateStore() (store.Store, error) {
if p.UseAPIV3 { etcdv3.Register()
etcdv3.Register() p.SetStoreType(store.ETCDV3)
p.SetStoreType(store.ETCDV3)
} else {
// TODO: Deprecated
log.Warn("The ETCD API V2 is deprecated. Please use API V3 instead")
etcd.Register()
p.SetStoreType(store.ETCD)
}
return p.Provider.CreateStore() return p.Provider.CreateStore()
} }

View file

@ -1,657 +0,0 @@
package etcd
import (
"crypto/tls"
"errors"
"log"
"net"
"net/http"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"github.com/abronan/valkeyrie"
"github.com/abronan/valkeyrie/store"
etcd "github.com/coreos/etcd/client"
)
const (
lockSuffix = "___lock"
)
var (
// ErrAbortTryLock is thrown when a user stops trying to seek the lock
// by sending a signal to the stop chan, this is used to verify if the
// operation succeeded
ErrAbortTryLock = errors.New("lock operation aborted")
)
// Etcd is the receiver type for the
// Store interface
type Etcd struct {
client etcd.KeysAPI
}
type etcdLock struct {
lock sync.Mutex
client etcd.KeysAPI
stopLock chan struct{}
stopRenew chan struct{}
mutexKey string
writeKey string
value string
last *etcd.Response
ttl time.Duration
}
const (
defaultLockTTL = 20 * time.Second
defaultUpdateTime = 5 * time.Second
)
// Register registers etcd to valkeyrie
func Register() {
valkeyrie.AddStore(store.ETCD, New)
}
// New creates a new Etcd client given a list
// of endpoints and an optional tls config
func New(addrs []string, options *store.Config) (store.Store, error) {
s := &Etcd{}
var (
entries []string
err error
)
entries = store.CreateEndpoints(addrs, "http")
cfg := &etcd.Config{
Endpoints: entries,
Transport: etcd.DefaultTransport,
HeaderTimeoutPerRequest: 3 * time.Second,
}
// Set options
if options != nil {
if options.TLS != nil {
setTLS(cfg, options.TLS, addrs)
}
if options.ConnectionTimeout != 0 {
setTimeout(cfg, options.ConnectionTimeout)
}
if options.Username != "" {
setCredentials(cfg, options.Username, options.Password)
}
}
c, err := etcd.New(*cfg)
if err != nil {
log.Fatal(err)
}
s.client = etcd.NewKeysAPI(c)
// Periodic Cluster Sync
if options != nil && options.SyncPeriod != 0 {
go func() {
for {
c.AutoSync(context.Background(), options.SyncPeriod)
}
}()
}
return s, nil
}
// SetTLS sets the tls configuration given a tls.Config scheme
func setTLS(cfg *etcd.Config, tls *tls.Config, addrs []string) {
entries := store.CreateEndpoints(addrs, "https")
cfg.Endpoints = entries
// Set transport
t := http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tls,
}
cfg.Transport = &t
}
// setTimeout sets the timeout used for connecting to the store
func setTimeout(cfg *etcd.Config, time time.Duration) {
cfg.HeaderTimeoutPerRequest = time
}
// setCredentials sets the username/password credentials for connecting to Etcd
func setCredentials(cfg *etcd.Config, username, password string) {
cfg.Username = username
cfg.Password = password
}
// Normalize the key for usage in Etcd
func (s *Etcd) normalize(key string) string {
key = store.Normalize(key)
return strings.TrimPrefix(key, "/")
}
// keyNotFound checks on the error returned by the KeysAPI
// to verify if the key exists in the store or not
func keyNotFound(err error) bool {
if err != nil {
if etcdError, ok := err.(etcd.Error); ok {
if etcdError.Code == etcd.ErrorCodeKeyNotFound ||
etcdError.Code == etcd.ErrorCodeNotFile ||
etcdError.Code == etcd.ErrorCodeNotDir {
return true
}
}
}
return false
}
// Get the value at "key", returns the last modified
// index to use in conjunction to Atomic calls
func (s *Etcd) Get(key string, opts *store.ReadOptions) (pair *store.KVPair, err error) {
getOpts := &etcd.GetOptions{
Quorum: true,
}
// Get options
if opts != nil {
getOpts.Quorum = opts.Consistent
}
result, err := s.client.Get(context.Background(), s.normalize(key), getOpts)
if err != nil {
if keyNotFound(err) {
return nil, store.ErrKeyNotFound
}
return nil, err
}
pair = &store.KVPair{
Key: key,
Value: []byte(result.Node.Value),
LastIndex: result.Node.ModifiedIndex,
}
return pair, nil
}
// Put a value at "key"
func (s *Etcd) Put(key string, value []byte, opts *store.WriteOptions) error {
setOpts := &etcd.SetOptions{}
// Set options
if opts != nil {
setOpts.Dir = opts.IsDir
setOpts.TTL = opts.TTL
}
_, err := s.client.Set(context.Background(), s.normalize(key), string(value), setOpts)
return err
}
// Delete a value at "key"
func (s *Etcd) Delete(key string) error {
opts := &etcd.DeleteOptions{
Recursive: false,
}
_, err := s.client.Delete(context.Background(), s.normalize(key), opts)
if keyNotFound(err) {
return store.ErrKeyNotFound
}
return err
}
// Exists checks if the key exists inside the store
func (s *Etcd) Exists(key string, opts *store.ReadOptions) (bool, error) {
_, err := s.Get(key, opts)
if err != nil {
if err == store.ErrKeyNotFound {
return false, nil
}
return false, err
}
return true, nil
}
// Watch for changes on a "key"
// It returns a channel that will receive changes or pass
// on errors. Upon creation, the current value will first
// be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *Etcd) Watch(key string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan *store.KVPair, error) {
wopts := &etcd.WatcherOptions{Recursive: false}
watcher := s.client.Watcher(s.normalize(key), wopts)
// watchCh is sending back events to the caller
watchCh := make(chan *store.KVPair)
// Get the current value
pair, err := s.Get(key, opts)
if err != nil {
return nil, err
}
go func() {
defer close(watchCh)
// Push the current value through the channel.
watchCh <- pair
for {
// Check if the watch was stopped by the caller
select {
case <-stopCh:
return
default:
}
result, err := watcher.Next(context.Background())
if err != nil {
return
}
watchCh <- &store.KVPair{
Key: key,
Value: []byte(result.Node.Value),
LastIndex: result.Node.ModifiedIndex,
}
}
}()
return watchCh, nil
}
// WatchTree watches for changes on a "directory"
// It returns a channel that will receive changes or pass
// on errors. Upon creating a watch, the current childs values
// will be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *Etcd) WatchTree(directory string, stopCh <-chan struct{}, opts *store.ReadOptions) (<-chan []*store.KVPair, error) {
watchOpts := &etcd.WatcherOptions{Recursive: true}
watcher := s.client.Watcher(s.normalize(directory), watchOpts)
// watchCh is sending back events to the caller
watchCh := make(chan []*store.KVPair)
// List current children
list, err := s.List(directory, opts)
if err != nil {
return nil, err
}
go func() {
defer close(watchCh)
// Push the current value through the channel.
watchCh <- list
for {
// Check if the watch was stopped by the caller
select {
case <-stopCh:
return
default:
}
_, err := watcher.Next(context.Background())
if err != nil {
return
}
list, err = s.List(directory, opts)
if err != nil {
return
}
watchCh <- list
}
}()
return watchCh, nil
}
// AtomicPut puts a value at "key" if the key has not been
// modified in the meantime, throws an error if this is the case
func (s *Etcd) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) {
var (
meta *etcd.Response
err error
)
setOpts := &etcd.SetOptions{}
if previous != nil {
setOpts.PrevExist = etcd.PrevExist
setOpts.PrevIndex = previous.LastIndex
if previous.Value != nil {
setOpts.PrevValue = string(previous.Value)
}
} else {
setOpts.PrevExist = etcd.PrevNoExist
}
if opts != nil {
if opts.TTL > 0 {
setOpts.TTL = opts.TTL
}
}
meta, err = s.client.Set(context.Background(), s.normalize(key), string(value), setOpts)
if err != nil {
if etcdError, ok := err.(etcd.Error); ok {
// Compare failed
if etcdError.Code == etcd.ErrorCodeTestFailed {
return false, nil, store.ErrKeyModified
}
// Node exists error (when PrevNoExist)
if etcdError.Code == etcd.ErrorCodeNodeExist {
return false, nil, store.ErrKeyExists
}
}
return false, nil, err
}
updated := &store.KVPair{
Key: key,
Value: value,
LastIndex: meta.Node.ModifiedIndex,
}
return true, updated, nil
}
// AtomicDelete deletes a value at "key" if the key
// has not been modified in the meantime, throws an
// error if this is the case
func (s *Etcd) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
if previous == nil {
return false, store.ErrPreviousNotSpecified
}
delOpts := &etcd.DeleteOptions{}
if previous != nil {
delOpts.PrevIndex = previous.LastIndex
if previous.Value != nil {
delOpts.PrevValue = string(previous.Value)
}
}
_, err := s.client.Delete(context.Background(), s.normalize(key), delOpts)
if err != nil {
if etcdError, ok := err.(etcd.Error); ok {
// Key Not Found
if etcdError.Code == etcd.ErrorCodeKeyNotFound {
return false, store.ErrKeyNotFound
}
// Compare failed
if etcdError.Code == etcd.ErrorCodeTestFailed {
return false, store.ErrKeyModified
}
}
return false, err
}
return true, nil
}
// List child nodes of a given directory
func (s *Etcd) List(directory string, opts *store.ReadOptions) ([]*store.KVPair, error) {
getOpts := &etcd.GetOptions{
Quorum: true,
Recursive: true,
Sort: true,
}
// Get options
if opts != nil {
getOpts.Quorum = opts.Consistent
}
resp, err := s.client.Get(context.Background(), s.normalize(directory), getOpts)
if err != nil {
if keyNotFound(err) {
return nil, store.ErrKeyNotFound
}
return nil, err
}
kv := []*store.KVPair{}
for _, n := range resp.Node.Nodes {
if n.Key == directory {
continue
}
// Etcd v2 seems to stop listing child keys at directories even
// with the "Recursive" option. If the child is a directory,
// we call `List` recursively to go through the whole set.
if n.Dir {
pairs, err := s.List(n.Key, opts)
if err != nil {
return nil, err
}
kv = append(kv, pairs...)
}
// Filter out etcd mutex side keys with `___lock` suffix
if strings.Contains(string(n.Key), lockSuffix) {
continue
}
kv = append(kv, &store.KVPair{
Key: n.Key,
Value: []byte(n.Value),
LastIndex: n.ModifiedIndex,
})
}
return kv, nil
}
// DeleteTree deletes a range of keys under a given directory
func (s *Etcd) DeleteTree(directory string) error {
delOpts := &etcd.DeleteOptions{
Recursive: true,
}
_, err := s.client.Delete(context.Background(), s.normalize(directory), delOpts)
if keyNotFound(err) {
return store.ErrKeyNotFound
}
return err
}
// NewLock returns a handle to a lock struct which can
// be used to provide mutual exclusion on a key
func (s *Etcd) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
var value string
ttl := defaultLockTTL
renewCh := make(chan struct{})
// Apply options on Lock
if options != nil {
if options.Value != nil {
value = string(options.Value)
}
if options.TTL != 0 {
ttl = options.TTL
}
if options.RenewLock != nil {
renewCh = options.RenewLock
}
}
// Create lock object
lock = &etcdLock{
client: s.client,
stopRenew: renewCh,
mutexKey: s.normalize(key + lockSuffix),
writeKey: s.normalize(key),
value: value,
ttl: ttl,
}
return lock, nil
}
// Lock attempts to acquire the lock and blocks while
// doing so. It returns a channel that is closed if our
// lock is lost or if an error occurs
func (l *etcdLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
l.lock.Lock()
defer l.lock.Unlock()
// Lock holder channel
lockHeld := make(chan struct{})
stopLocking := l.stopRenew
setOpts := &etcd.SetOptions{
TTL: l.ttl,
}
for {
setOpts.PrevExist = etcd.PrevNoExist
resp, err := l.client.Set(context.Background(), l.mutexKey, "", setOpts)
if err != nil {
if etcdError, ok := err.(etcd.Error); ok {
if etcdError.Code != etcd.ErrorCodeNodeExist {
return nil, err
}
setOpts.PrevIndex = ^uint64(0)
}
} else {
setOpts.PrevIndex = resp.Node.ModifiedIndex
}
setOpts.PrevExist = etcd.PrevExist
l.last, err = l.client.Set(context.Background(), l.mutexKey, "", setOpts)
if err == nil {
// Leader section
l.stopLock = stopLocking
go l.holdLock(l.mutexKey, lockHeld, stopLocking)
// We are holding the lock, set the write key
_, err = l.client.Set(context.Background(), l.writeKey, l.value, nil)
if err != nil {
return nil, err
}
break
} else {
// If this is a legitimate error, return
if etcdError, ok := err.(etcd.Error); ok {
if etcdError.Code != etcd.ErrorCodeTestFailed {
return nil, err
}
}
// Seeker section
errorCh := make(chan error)
chWStop := make(chan bool)
free := make(chan bool)
go l.waitLock(l.mutexKey, errorCh, chWStop, free)
// Wait for the key to be available or for
// a signal to stop trying to lock the key
select {
case <-free:
break
case err := <-errorCh:
return nil, err
case <-stopChan:
return nil, ErrAbortTryLock
}
// Delete or Expire event occurred
// Retry
}
}
return lockHeld, nil
}
// Hold the lock as long as we can
// Updates the key ttl periodically until we receive
// an explicit stop signal from the Unlock method
func (l *etcdLock) holdLock(key string, lockHeld chan struct{}, stopLocking <-chan struct{}) {
defer close(lockHeld)
update := time.NewTicker(l.ttl / 3)
defer update.Stop()
var err error
setOpts := &etcd.SetOptions{TTL: l.ttl}
for {
select {
case <-update.C:
setOpts.PrevIndex = l.last.Node.ModifiedIndex
l.last, err = l.client.Set(context.Background(), key, "", setOpts)
if err != nil {
return
}
case <-stopLocking:
return
}
}
}
// WaitLock simply waits for the key to be available for creation
func (l *etcdLock) waitLock(key string, errorCh chan error, stopWatchCh chan bool, free chan<- bool) {
opts := &etcd.WatcherOptions{Recursive: false}
watcher := l.client.Watcher(key, opts)
for {
event, err := watcher.Next(context.Background())
if err != nil {
errorCh <- err
return
}
if event.Action == "delete" || event.Action == "compareAndDelete" || event.Action == "expire" {
free <- true
return
}
}
}
// Unlock the "key". Calling unlock while
// not holding the lock will throw an error
func (l *etcdLock) Unlock() error {
l.lock.Lock()
defer l.lock.Unlock()
if l.stopLock != nil {
l.stopLock <- struct{}{}
}
if l.last != nil {
delOpts := &etcd.DeleteOptions{
PrevIndex: l.last.Node.ModifiedIndex,
}
_, err := l.client.Delete(context.Background(), l.mutexKey, delOpts)
if err != nil {
return err
}
}
return nil
}
// Close closes the client connection
func (s *Etcd) Close() {
return
}

View file

@ -1,236 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"context"
"encoding/json"
"net/http"
"net/url"
)
type Role struct {
Role string `json:"role"`
Permissions Permissions `json:"permissions"`
Grant *Permissions `json:"grant,omitempty"`
Revoke *Permissions `json:"revoke,omitempty"`
}
type Permissions struct {
KV rwPermission `json:"kv"`
}
type rwPermission struct {
Read []string `json:"read"`
Write []string `json:"write"`
}
type PermissionType int
const (
ReadPermission PermissionType = iota
WritePermission
ReadWritePermission
)
// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
// interact with etcd's role creation and modification features.
func NewAuthRoleAPI(c Client) AuthRoleAPI {
return &httpAuthRoleAPI{
client: c,
}
}
type AuthRoleAPI interface {
// AddRole adds a role.
AddRole(ctx context.Context, role string) error
// RemoveRole removes a role.
RemoveRole(ctx context.Context, role string) error
// GetRole retrieves role details.
GetRole(ctx context.Context, role string) (*Role, error)
// GrantRoleKV grants a role some permission prefixes for the KV store.
GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// RevokeRoleKV revokes some permission prefixes for a role on the KV store.
RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// ListRoles lists roles.
ListRoles(ctx context.Context) ([]string, error)
}
type httpAuthRoleAPI struct {
client httpClient
}
type authRoleAPIAction struct {
verb string
name string
role *Role
}
type authRoleAPIList struct{}
func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "roles", "")
req, _ := http.NewRequest("GET", u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "roles", l.name)
if l.role == nil {
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
b, err := json.Marshal(l.role)
if err != nil {
panic(err)
}
body := bytes.NewReader(b)
req, _ := http.NewRequest(l.verb, u.String(), body)
req.Header.Set("Content-Type", "application/json")
return req
}
func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var roleList struct {
Roles []Role `json:"roles"`
}
if err = json.Unmarshal(body, &roleList); err != nil {
return nil, err
}
ret := make([]string, 0, len(roleList.Roles))
for _, r := range roleList.Roles {
ret = append(ret, r.Role)
}
return ret, nil
}
func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
role := &Role{
Role: rolename,
}
return r.addRemoveRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
return r.addRemoveRole(ctx, &authRoleAPIAction{
verb: "DELETE",
name: rolename,
})
}
func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
resp, body, err := r.client.Do(ctx, req)
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
return r.modRole(ctx, &authRoleAPIAction{
verb: "GET",
name: rolename,
})
}
func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
var out rwPermission
switch permType {
case ReadPermission:
out.Read = prefixes
case WritePermission:
out.Write = prefixes
case ReadWritePermission:
out.Read = prefixes
out.Write = prefixes
}
return out
}
func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
rwp := buildRWPermission(prefixes, permType)
role := &Role{
Role: rolename,
Grant: &Permissions{
KV: rwp,
},
}
return r.modRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
rwp := buildRWPermission(prefixes, permType)
role := &Role{
Role: rolename,
Revoke: &Permissions{
KV: rwp,
},
}
return r.modRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
resp, body, err := r.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var role Role
if err = json.Unmarshal(body, &role); err != nil {
return nil, err
}
return &role, nil
}

View file

@ -1,319 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"context"
"encoding/json"
"net/http"
"net/url"
"path"
)
var (
defaultV2AuthPrefix = "/v2/auth"
)
type User struct {
User string `json:"user"`
Password string `json:"password,omitempty"`
Roles []string `json:"roles"`
Grant []string `json:"grant,omitempty"`
Revoke []string `json:"revoke,omitempty"`
}
// userListEntry is the user representation given by the server for ListUsers
type userListEntry struct {
User string `json:"user"`
Roles []Role `json:"roles"`
}
type UserRoles struct {
User string `json:"user"`
Roles []Role `json:"roles"`
}
func v2AuthURL(ep url.URL, action string, name string) *url.URL {
if name != "" {
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
return &ep
}
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
return &ep
}
// NewAuthAPI constructs a new AuthAPI that uses HTTP to
// interact with etcd's general auth features.
func NewAuthAPI(c Client) AuthAPI {
return &httpAuthAPI{
client: c,
}
}
type AuthAPI interface {
// Enable auth.
Enable(ctx context.Context) error
// Disable auth.
Disable(ctx context.Context) error
}
type httpAuthAPI struct {
client httpClient
}
func (s *httpAuthAPI) Enable(ctx context.Context) error {
return s.enableDisable(ctx, &authAPIAction{"PUT"})
}
func (s *httpAuthAPI) Disable(ctx context.Context) error {
return s.enableDisable(ctx, &authAPIAction{"DELETE"})
}
func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
resp, body, err := s.client.Do(ctx, req)
if err != nil {
return err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
type authAPIAction struct {
verb string
}
func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "enable", "")
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
type authError struct {
Message string `json:"message"`
Code int `json:"-"`
}
func (e authError) Error() string {
return e.Message
}
// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
// interact with etcd's user creation and modification features.
func NewAuthUserAPI(c Client) AuthUserAPI {
return &httpAuthUserAPI{
client: c,
}
}
type AuthUserAPI interface {
// AddUser adds a user.
AddUser(ctx context.Context, username string, password string) error
// RemoveUser removes a user.
RemoveUser(ctx context.Context, username string) error
// GetUser retrieves user details.
GetUser(ctx context.Context, username string) (*User, error)
// GrantUser grants a user some permission roles.
GrantUser(ctx context.Context, username string, roles []string) (*User, error)
// RevokeUser revokes some permission roles from a user.
RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
// ChangePassword changes the user's password.
ChangePassword(ctx context.Context, username string, password string) (*User, error)
// ListUsers lists the users.
ListUsers(ctx context.Context) ([]string, error)
}
type httpAuthUserAPI struct {
client httpClient
}
type authUserAPIAction struct {
verb string
username string
user *User
}
type authUserAPIList struct{}
func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "users", "")
req, _ := http.NewRequest("GET", u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "users", l.username)
if l.user == nil {
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
b, err := json.Marshal(l.user)
if err != nil {
panic(err)
}
body := bytes.NewReader(b)
req, _ := http.NewRequest(l.verb, u.String(), body)
req.Header.Set("Content-Type", "application/json")
return req
}
func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
resp, body, err := u.client.Do(ctx, &authUserAPIList{})
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var userList struct {
Users []userListEntry `json:"users"`
}
if err = json.Unmarshal(body, &userList); err != nil {
return nil, err
}
ret := make([]string, 0, len(userList.Users))
for _, u := range userList.Users {
ret = append(ret, u.User)
}
return ret, nil
}
func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
user := &User{
User: username,
Password: password,
}
return u.addRemoveUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
return u.addRemoveUser(ctx, &authUserAPIAction{
verb: "DELETE",
username: username,
})
}
func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
resp, body, err := u.client.Do(ctx, req)
if err != nil {
return err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
return u.modUser(ctx, &authUserAPIAction{
verb: "GET",
username: username,
})
}
func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
user := &User{
User: username,
Grant: roles,
}
return u.modUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
user := &User{
User: username,
Revoke: roles,
}
return u.modUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
user := &User{
User: username,
Password: password,
}
return u.modUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
resp, body, err := u.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var user User
if err = json.Unmarshal(body, &user); err != nil {
var userR UserRoles
if urerr := json.Unmarshal(body, &userR); urerr != nil {
return nil, err
}
user.User = userR.User
for _, r := range userR.Roles {
user.Roles = append(user.Roles, r.Role)
}
}
return &user, nil
}

View file

@ -1,18 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// borrowed from golang/net/context/ctxhttp/cancelreq.go
package client
import "net/http"
func requestCanceler(tr CancelableTransport, req *http.Request) func() {
ch := make(chan struct{})
req.Cancel = ch
return func() {
close(ch)
}
}

View file

@ -1,710 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"sort"
"strconv"
"sync"
"time"
"github.com/coreos/etcd/version"
)
var (
ErrNoEndpoints = errors.New("client: no endpoints available")
ErrTooManyRedirects = errors.New("client: too many redirects")
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
// that Do() will not retry a request
oneShotCtxValue interface{}
)
var DefaultRequestTimeout = 5 * time.Second
var DefaultTransport CancelableTransport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
}
type EndpointSelectionMode int
const (
// EndpointSelectionRandom is the default value of the 'SelectionMode'.
// As the name implies, the client object will pick a node from the members
// of the cluster in a random fashion. If the cluster has three members, A, B,
// and C, the client picks any node from its three members as its request
// destination.
EndpointSelectionRandom EndpointSelectionMode = iota
// If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
// requests are sent directly to the cluster leader. This reduces
// forwarding roundtrips compared to making requests to etcd followers
// who then forward them to the cluster leader. In the event of a leader
// failure, however, clients configured this way cannot prioritize among
// the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
// to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
// maintain its knowledge of current cluster state.
//
// This mode should be used with Client.AutoSync().
EndpointSelectionPrioritizeLeader
)
type Config struct {
// Endpoints defines a set of URLs (schemes, hosts and ports only)
// that can be used to communicate with a logical etcd cluster. For
// example, a three-node cluster could be provided like so:
//
// Endpoints: []string{
// "http://node1.example.com:2379",
// "http://node2.example.com:2379",
// "http://node3.example.com:2379",
// }
//
// If multiple endpoints are provided, the Client will attempt to
// use them all in the event that one or more of them are unusable.
//
// If Client.Sync is ever called, the Client may cache an alternate
// set of endpoints to continue operation.
Endpoints []string
// Transport is used by the Client to drive HTTP requests. If not
// provided, DefaultTransport will be used.
Transport CancelableTransport
// CheckRedirect specifies the policy for handling HTTP redirects.
// If CheckRedirect is not nil, the Client calls it before
// following an HTTP redirect. The sole argument is the number of
// requests that have already been made. If CheckRedirect returns
// an error, Client.Do will not make any further requests and return
// the error back it to the caller.
//
// If CheckRedirect is nil, the Client uses its default policy,
// which is to stop after 10 consecutive requests.
CheckRedirect CheckRedirectFunc
// Username specifies the user credential to add as an authorization header
Username string
// Password is the password for the specified user to add as an authorization header
// to the request.
Password string
// HeaderTimeoutPerRequest specifies the time limit to wait for response
// header in a single request made by the Client. The timeout includes
// connection time, any redirects, and header wait time.
//
// For non-watch GET request, server returns the response body immediately.
// For PUT/POST/DELETE request, server will attempt to commit request
// before responding, which is expected to take `100ms + 2 * RTT`.
// For watch request, server returns the header immediately to notify Client
// watch start. But if server is behind some kind of proxy, the response
// header may be cached at proxy, and Client cannot rely on this behavior.
//
// Especially, wait request will ignore this timeout.
//
// One API call may send multiple requests to different etcd servers until it
// succeeds. Use context of the API to specify the overall timeout.
//
// A HeaderTimeoutPerRequest of zero means no timeout.
HeaderTimeoutPerRequest time.Duration
// SelectionMode is an EndpointSelectionMode enum that specifies the
// policy for choosing the etcd cluster node to which requests are sent.
SelectionMode EndpointSelectionMode
}
func (cfg *Config) transport() CancelableTransport {
if cfg.Transport == nil {
return DefaultTransport
}
return cfg.Transport
}
func (cfg *Config) checkRedirect() CheckRedirectFunc {
if cfg.CheckRedirect == nil {
return DefaultCheckRedirect
}
return cfg.CheckRedirect
}
// CancelableTransport mimics net/http.Transport, but requires that
// the object also support request cancellation.
type CancelableTransport interface {
http.RoundTripper
CancelRequest(req *http.Request)
}
type CheckRedirectFunc func(via int) error
// DefaultCheckRedirect follows up to 10 redirects, but no more.
var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
if via > 10 {
return ErrTooManyRedirects
}
return nil
}
type Client interface {
// Sync updates the internal cache of the etcd cluster's membership.
Sync(context.Context) error
// AutoSync periodically calls Sync() every given interval.
// The recommended sync interval is 10 seconds to 1 minute, which does
// not bring too much overhead to server and makes client catch up the
// cluster change in time.
//
// The example to use it:
//
// for {
// err := client.AutoSync(ctx, 10*time.Second)
// if err == context.DeadlineExceeded || err == context.Canceled {
// break
// }
// log.Print(err)
// }
AutoSync(context.Context, time.Duration) error
// Endpoints returns a copy of the current set of API endpoints used
// by Client to resolve HTTP requests. If Sync has ever been called,
// this may differ from the initial Endpoints provided in the Config.
Endpoints() []string
// SetEndpoints sets the set of API endpoints used by Client to resolve
// HTTP requests. If the given endpoints are not valid, an error will be
// returned
SetEndpoints(eps []string) error
// GetVersion retrieves the current etcd server and cluster version
GetVersion(ctx context.Context) (*version.Versions, error)
httpClient
}
func New(cfg Config) (Client, error) {
c := &httpClusterClient{
clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
selectionMode: cfg.SelectionMode,
}
if cfg.Username != "" {
c.credentials = &credentials{
username: cfg.Username,
password: cfg.Password,
}
}
if err := c.SetEndpoints(cfg.Endpoints); err != nil {
return nil, err
}
return c, nil
}
type httpClient interface {
Do(context.Context, httpAction) (*http.Response, []byte, error)
}
func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
return func(ep url.URL) httpClient {
return &redirectFollowingHTTPClient{
checkRedirect: cr,
client: &simpleHTTPClient{
transport: tr,
endpoint: ep,
headerTimeout: headerTimeout,
},
}
}
}
type credentials struct {
username string
password string
}
type httpClientFactory func(url.URL) httpClient
type httpAction interface {
HTTPRequest(url.URL) *http.Request
}
type httpClusterClient struct {
clientFactory httpClientFactory
endpoints []url.URL
pinned int
credentials *credentials
sync.RWMutex
rand *rand.Rand
selectionMode EndpointSelectionMode
}
func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
ceps := make([]url.URL, len(eps))
copy(ceps, eps)
// To perform a lookup on the new endpoint list without using the current
// client, we'll copy it
clientCopy := &httpClusterClient{
clientFactory: c.clientFactory,
credentials: c.credentials,
rand: c.rand,
pinned: 0,
endpoints: ceps,
}
mAPI := NewMembersAPI(clientCopy)
leader, err := mAPI.Leader(ctx)
if err != nil {
return "", err
}
if len(leader.ClientURLs) == 0 {
return "", ErrNoLeaderEndpoint
}
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
}
func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
if len(eps) == 0 {
return []url.URL{}, ErrNoEndpoints
}
neps := make([]url.URL, len(eps))
for i, ep := range eps {
u, err := url.Parse(ep)
if err != nil {
return []url.URL{}, err
}
neps[i] = *u
}
return neps, nil
}
func (c *httpClusterClient) SetEndpoints(eps []string) error {
neps, err := c.parseEndpoints(eps)
if err != nil {
return err
}
c.Lock()
defer c.Unlock()
c.endpoints = shuffleEndpoints(c.rand, neps)
// We're not doing anything for PrioritizeLeader here. This is
// due to not having a context meaning we can't call getLeaderEndpoint
// However, if you're using PrioritizeLeader, you've already been told
// to regularly call sync, where we do have a ctx, and can figure the
// leader. PrioritizeLeader is also quite a loose guarantee, so deal
// with it
c.pinned = 0
return nil
}
func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
action := act
c.RLock()
leps := len(c.endpoints)
eps := make([]url.URL, leps)
n := copy(eps, c.endpoints)
pinned := c.pinned
if c.credentials != nil {
action = &authedAction{
act: act,
credentials: *c.credentials,
}
}
c.RUnlock()
if leps == 0 {
return nil, nil, ErrNoEndpoints
}
if leps != n {
return nil, nil, errors.New("unable to pick endpoint: copy failed")
}
var resp *http.Response
var body []byte
var err error
cerr := &ClusterError{}
isOneShot := ctx.Value(&oneShotCtxValue) != nil
for i := pinned; i < leps+pinned; i++ {
k := i % leps
hc := c.clientFactory(eps[k])
resp, body, err = hc.Do(ctx, action)
if err != nil {
cerr.Errors = append(cerr.Errors, err)
if err == ctx.Err() {
return nil, nil, ctx.Err()
}
if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err
}
} else if resp.StatusCode/100 == 5 {
switch resp.StatusCode {
case http.StatusInternalServerError, http.StatusServiceUnavailable:
// TODO: make sure this is a no leader response
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
default:
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
}
err = cerr.Errors[0]
}
if err != nil {
if !isOneShot {
continue
}
c.Lock()
c.pinned = (k + 1) % leps
c.Unlock()
return nil, nil, err
}
if k != pinned {
c.Lock()
c.pinned = k
c.Unlock()
}
return resp, body, nil
}
return nil, nil, cerr
}
func (c *httpClusterClient) Endpoints() []string {
c.RLock()
defer c.RUnlock()
eps := make([]string, len(c.endpoints))
for i, ep := range c.endpoints {
eps[i] = ep.String()
}
return eps
}
func (c *httpClusterClient) Sync(ctx context.Context) error {
mAPI := NewMembersAPI(c)
ms, err := mAPI.List(ctx)
if err != nil {
return err
}
var eps []string
for _, m := range ms {
eps = append(eps, m.ClientURLs...)
}
neps, err := c.parseEndpoints(eps)
if err != nil {
return err
}
npin := 0
switch c.selectionMode {
case EndpointSelectionRandom:
c.RLock()
eq := endpointsEqual(c.endpoints, neps)
c.RUnlock()
if eq {
return nil
}
// When items in the endpoint list changes, we choose a new pin
neps = shuffleEndpoints(c.rand, neps)
case EndpointSelectionPrioritizeLeader:
nle, err := c.getLeaderEndpoint(ctx, neps)
if err != nil {
return ErrNoLeaderEndpoint
}
for i, n := range neps {
if n.String() == nle {
npin = i
break
}
}
default:
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
}
c.Lock()
defer c.Unlock()
c.endpoints = neps
c.pinned = npin
return nil
}
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
err := c.Sync(ctx)
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
}
func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
act := &getAction{Prefix: "/version"}
resp, body, err := c.Do(ctx, act)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case http.StatusOK:
if len(body) == 0 {
return nil, ErrEmptyBody
}
var vresp version.Versions
if err := json.Unmarshal(body, &vresp); err != nil {
return nil, ErrInvalidJSON
}
return &vresp, nil
default:
var etcdErr Error
if err := json.Unmarshal(body, &etcdErr); err != nil {
return nil, ErrInvalidJSON
}
return nil, etcdErr
}
}
type roundTripResponse struct {
resp *http.Response
err error
}
type simpleHTTPClient struct {
transport CancelableTransport
endpoint url.URL
headerTimeout time.Duration
}
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
req := act.HTTPRequest(c.endpoint)
if err := printcURL(req); err != nil {
return nil, nil, err
}
isWait := false
if req != nil && req.URL != nil {
ws := req.URL.Query().Get("wait")
if len(ws) != 0 {
var err error
isWait, err = strconv.ParseBool(ws)
if err != nil {
return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
}
}
}
var hctx context.Context
var hcancel context.CancelFunc
if !isWait && c.headerTimeout > 0 {
hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
} else {
hctx, hcancel = context.WithCancel(ctx)
}
defer hcancel()
reqcancel := requestCanceler(c.transport, req)
rtchan := make(chan roundTripResponse, 1)
go func() {
resp, err := c.transport.RoundTrip(req)
rtchan <- roundTripResponse{resp: resp, err: err}
close(rtchan)
}()
var resp *http.Response
var err error
select {
case rtresp := <-rtchan:
resp, err = rtresp.resp, rtresp.err
case <-hctx.Done():
// cancel and wait for request to actually exit before continuing
reqcancel()
rtresp := <-rtchan
resp = rtresp.resp
switch {
case ctx.Err() != nil:
err = ctx.Err()
case hctx.Err() != nil:
err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
default:
panic("failed to get error from context")
}
}
// always check for resp nil-ness to deal with possible
// race conditions between channels above
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
if err != nil {
return nil, nil, err
}
var body []byte
done := make(chan struct{})
go func() {
body, err = ioutil.ReadAll(resp.Body)
done <- struct{}{}
}()
select {
case <-ctx.Done():
resp.Body.Close()
<-done
return nil, nil, ctx.Err()
case <-done:
}
return resp, body, err
}
type authedAction struct {
act httpAction
credentials credentials
}
func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
r := a.act.HTTPRequest(url)
r.SetBasicAuth(a.credentials.username, a.credentials.password)
return r
}
type redirectFollowingHTTPClient struct {
client httpClient
checkRedirect CheckRedirectFunc
}
func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
next := act
for i := 0; i < 100; i++ {
if i > 0 {
if err := r.checkRedirect(i); err != nil {
return nil, nil, err
}
}
resp, body, err := r.client.Do(ctx, next)
if err != nil {
return nil, nil, err
}
if resp.StatusCode/100 == 3 {
hdr := resp.Header.Get("Location")
if hdr == "" {
return nil, nil, fmt.Errorf("Location header not set")
}
loc, err := url.Parse(hdr)
if err != nil {
return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr)
}
next = &redirectedHTTPAction{
action: act,
location: *loc,
}
continue
}
return resp, body, nil
}
return nil, nil, errTooManyRedirectChecks
}
type redirectedHTTPAction struct {
action httpAction
location url.URL
}
func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
orig := r.action.HTTPRequest(ep)
orig.URL = &r.location
return orig
}
func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
// copied from Go 1.9<= rand.Rand.Perm
n := len(eps)
p := make([]int, n)
for i := 0; i < n; i++ {
j := r.Intn(i + 1)
p[i] = p[j]
p[j] = i
}
neps := make([]url.URL, n)
for i, k := range p {
neps[i] = eps[k]
}
return neps
}
func endpointsEqual(left, right []url.URL) bool {
if len(left) != len(right) {
return false
}
sLeft := make([]string, len(left))
sRight := make([]string, len(right))
for i, l := range left {
sLeft[i] = l.String()
}
for i, r := range right {
sRight[i] = r.String()
}
sort.Strings(sLeft)
sort.Strings(sRight)
for i := range sLeft {
if sLeft[i] != sRight[i] {
return false
}
}
return true
}

View file

@ -1,37 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import "fmt"
type ClusterError struct {
Errors []error
}
func (ce *ClusterError) Error() string {
s := ErrClusterUnavailable.Error()
for i, e := range ce.Errors {
s += fmt.Sprintf("; error #%d: %s\n", i, e)
}
return s
}
func (ce *ClusterError) Detail() string {
s := ""
for i, e := range ce.Errors {
s += fmt.Sprintf("error #%d: %s\n", i, e)
}
return s
}

View file

@ -1,70 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"os"
)
var (
cURLDebug = false
)
func EnablecURLDebug() {
cURLDebug = true
}
func DisablecURLDebug() {
cURLDebug = false
}
// printcURL prints the cURL equivalent request to stderr.
// It returns an error if the body of the request cannot
// be read.
// The caller MUST cancel the request if there is an error.
func printcURL(req *http.Request) error {
if !cURLDebug {
return nil
}
var (
command string
b []byte
err error
)
if req.URL != nil {
command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
}
if req.Body != nil {
b, err = ioutil.ReadAll(req.Body)
if err != nil {
return err
}
command += fmt.Sprintf(" -d %q", string(b))
}
fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
// reset body
body := bytes.NewBuffer(b)
req.Body = ioutil.NopCloser(body)
return nil
}

View file

@ -1,40 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"github.com/coreos/etcd/pkg/srv"
)
// Discoverer is an interface that wraps the Discover method.
type Discoverer interface {
// Discover looks up the etcd servers for the domain.
Discover(domain string) ([]string, error)
}
type srvDiscover struct{}
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
func NewSRVDiscover() Discoverer {
return &srvDiscover{}
}
func (d *srvDiscover) Discover(domain string) ([]string, error) {
srvs, err := srv.GetClient("etcd-client", domain)
if err != nil {
return nil, err
}
return srvs.Endpoints, nil
}

View file

@ -1,73 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package client provides bindings for the etcd APIs.
Create a Config and exchange it for a Client:
import (
"net/http"
"context"
"github.com/coreos/etcd/client"
)
cfg := client.Config{
Endpoints: []string{"http://127.0.0.1:2379"},
Transport: DefaultTransport,
}
c, err := client.New(cfg)
if err != nil {
// handle error
}
Clients are safe for concurrent use by multiple goroutines.
Create a KeysAPI using the Client, then use it to interact with etcd:
kAPI := client.NewKeysAPI(c)
// create a new key /foo with the value "bar"
_, err = kAPI.Create(context.Background(), "/foo", "bar")
if err != nil {
// handle error
}
// delete the newly created key only if the value is still "bar"
_, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
if err != nil {
// handle error
}
Use a custom context to set timeouts on your operations:
import "time"
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// set a new key, ignoring its previous state
_, err := kAPI.Set(ctx, "/ping", "pong", nil)
if err != nil {
if err == context.DeadlineExceeded {
// request took longer than 5s
} else {
// handle error
}
}
*/
package client

File diff suppressed because it is too large Load diff

View file

@ -1,681 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/coreos/etcd/pkg/pathutil"
"github.com/ugorji/go/codec"
)
const (
ErrorCodeKeyNotFound = 100
ErrorCodeTestFailed = 101
ErrorCodeNotFile = 102
ErrorCodeNotDir = 104
ErrorCodeNodeExist = 105
ErrorCodeRootROnly = 107
ErrorCodeDirNotEmpty = 108
ErrorCodeUnauthorized = 110
ErrorCodePrevValueRequired = 201
ErrorCodeTTLNaN = 202
ErrorCodeIndexNaN = 203
ErrorCodeInvalidField = 209
ErrorCodeInvalidForm = 210
ErrorCodeRaftInternal = 300
ErrorCodeLeaderElect = 301
ErrorCodeWatcherCleared = 400
ErrorCodeEventIndexCleared = 401
)
type Error struct {
Code int `json:"errorCode"`
Message string `json:"message"`
Cause string `json:"cause"`
Index uint64 `json:"index"`
}
func (e Error) Error() string {
return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index)
}
var (
ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.")
ErrEmptyBody = errors.New("client: response body is empty")
)
// PrevExistType is used to define an existence condition when setting
// or deleting Nodes.
type PrevExistType string
const (
PrevIgnore = PrevExistType("")
PrevExist = PrevExistType("true")
PrevNoExist = PrevExistType("false")
)
var (
defaultV2KeysPrefix = "/v2/keys"
)
// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
// API over HTTP.
func NewKeysAPI(c Client) KeysAPI {
return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix)
}
// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller
// to provide a custom base URL path. This should only be used in
// very rare cases.
func NewKeysAPIWithPrefix(c Client, p string) KeysAPI {
return &httpKeysAPI{
client: c,
prefix: p,
}
}
type KeysAPI interface {
// Get retrieves a set of Nodes from etcd
Get(ctx context.Context, key string, opts *GetOptions) (*Response, error)
// Set assigns a new value to a Node identified by a given key. The caller
// may define a set of conditions in the SetOptions. If SetOptions.Dir=true
// then value is ignored.
Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
// Delete removes a Node identified by the given key, optionally destroying
// all of its children as well. The caller may define a set of required
// conditions in an DeleteOptions object.
Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error)
// Create is an alias for Set w/ PrevExist=false
Create(ctx context.Context, key, value string) (*Response, error)
// CreateInOrder is used to atomically create in-order keys within the given directory.
CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error)
// Update is an alias for Set w/ PrevExist=true
Update(ctx context.Context, key, value string) (*Response, error)
// Watcher builds a new Watcher targeted at a specific Node identified
// by the given key. The Watcher may be configured at creation time
// through a WatcherOptions object. The returned Watcher is designed
// to emit events that happen to a Node, and optionally to its children.
Watcher(key string, opts *WatcherOptions) Watcher
}
type WatcherOptions struct {
// AfterIndex defines the index after-which the Watcher should
// start emitting events. For example, if a value of 5 is
// provided, the first event will have an index >= 6.
//
// Setting AfterIndex to 0 (default) means that the Watcher
// should start watching for events starting at the current
// index, whatever that may be.
AfterIndex uint64
// Recursive specifies whether or not the Watcher should emit
// events that occur in children of the given keyspace. If set
// to false (default), events will be limited to those that
// occur for the exact key.
Recursive bool
}
type CreateInOrderOptions struct {
// TTL defines a period of time after-which the Node should
// expire and no longer exist. Values <= 0 are ignored. Given
// that the zero-value is ignored, TTL cannot be used to set
// a TTL of 0.
TTL time.Duration
}
type SetOptions struct {
// PrevValue specifies what the current value of the Node must
// be in order for the Set operation to succeed.
//
// Leaving this field empty means that the caller wishes to
// ignore the current value of the Node. This cannot be used
// to compare the Node's current value to an empty string.
//
// PrevValue is ignored if Dir=true
PrevValue string
// PrevIndex indicates what the current ModifiedIndex of the
// Node must be in order for the Set operation to succeed.
//
// If PrevIndex is set to 0 (default), no comparison is made.
PrevIndex uint64
// PrevExist specifies whether the Node must currently exist
// (PrevExist) or not (PrevNoExist). If the caller does not
// care about existence, set PrevExist to PrevIgnore, or simply
// leave it unset.
PrevExist PrevExistType
// TTL defines a period of time after-which the Node should
// expire and no longer exist. Values <= 0 are ignored. Given
// that the zero-value is ignored, TTL cannot be used to set
// a TTL of 0.
TTL time.Duration
// Refresh set to true means a TTL value can be updated
// without firing a watch or changing the node value. A
// value must not be provided when refreshing a key.
Refresh bool
// Dir specifies whether or not this Node should be created as a directory.
Dir bool
// NoValueOnSuccess specifies whether the response contains the current value of the Node.
// If set, the response will only contain the current value when the request fails.
NoValueOnSuccess bool
}
type GetOptions struct {
// Recursive defines whether or not all children of the Node
// should be returned.
Recursive bool
// Sort instructs the server whether or not to sort the Nodes.
// If true, the Nodes are sorted alphabetically by key in
// ascending order (A to z). If false (default), the Nodes will
// not be sorted and the ordering used should not be considered
// predictable.
Sort bool
// Quorum specifies whether it gets the latest committed value that
// has been applied in quorum of members, which ensures external
// consistency (or linearizability).
Quorum bool
}
type DeleteOptions struct {
// PrevValue specifies what the current value of the Node must
// be in order for the Delete operation to succeed.
//
// Leaving this field empty means that the caller wishes to
// ignore the current value of the Node. This cannot be used
// to compare the Node's current value to an empty string.
PrevValue string
// PrevIndex indicates what the current ModifiedIndex of the
// Node must be in order for the Delete operation to succeed.
//
// If PrevIndex is set to 0 (default), no comparison is made.
PrevIndex uint64
// Recursive defines whether or not all children of the Node
// should be deleted. If set to true, all children of the Node
// identified by the given key will be deleted. If left unset
// or explicitly set to false, only a single Node will be
// deleted.
Recursive bool
// Dir specifies whether or not this Node should be removed as a directory.
Dir bool
}
type Watcher interface {
// Next blocks until an etcd event occurs, then returns a Response
// representing that event. The behavior of Next depends on the
// WatcherOptions used to construct the Watcher. Next is designed to
// be called repeatedly, each time blocking until a subsequent event
// is available.
//
// If the provided context is cancelled, Next will return a non-nil
// error. Any other failures encountered while waiting for the next
// event (connection issues, deserialization failures, etc) will
// also result in a non-nil error.
Next(context.Context) (*Response, error)
}
type Response struct {
// Action is the name of the operation that occurred. Possible values
// include get, set, delete, update, create, compareAndSwap,
// compareAndDelete and expire.
Action string `json:"action"`
// Node represents the state of the relevant etcd Node.
Node *Node `json:"node"`
// PrevNode represents the previous state of the Node. PrevNode is non-nil
// only if the Node existed before the action occurred and the action
// caused a change to the Node.
PrevNode *Node `json:"prevNode"`
// Index holds the cluster-level index at the time the Response was generated.
// This index is not tied to the Node(s) contained in this Response.
Index uint64 `json:"-"`
// ClusterID holds the cluster-level ID reported by the server. This
// should be different for different etcd clusters.
ClusterID string `json:"-"`
}
type Node struct {
// Key represents the unique location of this Node (e.g. "/foo/bar").
Key string `json:"key"`
// Dir reports whether node describes a directory.
Dir bool `json:"dir,omitempty"`
// Value is the current data stored on this Node. If this Node
// is a directory, Value will be empty.
Value string `json:"value"`
// Nodes holds the children of this Node, only if this Node is a directory.
// This slice of will be arbitrarily deep (children, grandchildren, great-
// grandchildren, etc.) if a recursive Get or Watch request were made.
Nodes Nodes `json:"nodes"`
// CreatedIndex is the etcd index at-which this Node was created.
CreatedIndex uint64 `json:"createdIndex"`
// ModifiedIndex is the etcd index at-which this Node was last modified.
ModifiedIndex uint64 `json:"modifiedIndex"`
// Expiration is the server side expiration time of the key.
Expiration *time.Time `json:"expiration,omitempty"`
// TTL is the time to live of the key in second.
TTL int64 `json:"ttl,omitempty"`
}
func (n *Node) String() string {
return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL)
}
// TTLDuration returns the Node's TTL as a time.Duration object
func (n *Node) TTLDuration() time.Duration {
return time.Duration(n.TTL) * time.Second
}
type Nodes []*Node
// interfaces for sorting
func (ns Nodes) Len() int { return len(ns) }
func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
type httpKeysAPI struct {
client httpClient
prefix string
}
func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) {
act := &setAction{
Prefix: k.prefix,
Key: key,
Value: val,
}
if opts != nil {
act.PrevValue = opts.PrevValue
act.PrevIndex = opts.PrevIndex
act.PrevExist = opts.PrevExist
act.TTL = opts.TTL
act.Refresh = opts.Refresh
act.Dir = opts.Dir
act.NoValueOnSuccess = opts.NoValueOnSuccess
}
doCtx := ctx
if act.PrevExist == PrevNoExist {
doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
}
resp, body, err := k.client.Do(doCtx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) {
return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist})
}
func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) {
act := &createInOrderAction{
Prefix: k.prefix,
Dir: dir,
Value: val,
}
if opts != nil {
act.TTL = opts.TTL
}
resp, body, err := k.client.Do(ctx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) {
return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist})
}
func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) {
act := &deleteAction{
Prefix: k.prefix,
Key: key,
}
if opts != nil {
act.PrevValue = opts.PrevValue
act.PrevIndex = opts.PrevIndex
act.Dir = opts.Dir
act.Recursive = opts.Recursive
}
doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
resp, body, err := k.client.Do(doCtx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) {
act := &getAction{
Prefix: k.prefix,
Key: key,
}
if opts != nil {
act.Recursive = opts.Recursive
act.Sorted = opts.Sort
act.Quorum = opts.Quorum
}
resp, body, err := k.client.Do(ctx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher {
act := waitAction{
Prefix: k.prefix,
Key: key,
}
if opts != nil {
act.Recursive = opts.Recursive
if opts.AfterIndex > 0 {
act.WaitIndex = opts.AfterIndex + 1
}
}
return &httpWatcher{
client: k.client,
nextWait: act,
}
}
type httpWatcher struct {
client httpClient
nextWait waitAction
}
func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
for {
httpresp, body, err := hw.client.Do(ctx, &hw.nextWait)
if err != nil {
return nil, err
}
resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
if err != nil {
if err == ErrEmptyBody {
continue
}
return nil, err
}
hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1
return resp, nil
}
}
// v2KeysURL forms a URL representing the location of a key.
// The endpoint argument represents the base URL of an etcd
// server. The prefix is the path needed to route from the
// provided endpoint's path to the root of the keys API
// (typically "/v2/keys").
func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
// We concatenate all parts together manually. We cannot use
// path.Join because it does not reserve trailing slash.
// We call CanonicalURLPath to further cleanup the path.
if prefix != "" && prefix[0] != '/' {
prefix = "/" + prefix
}
if key != "" && key[0] != '/' {
key = "/" + key
}
ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key)
return &ep
}
type getAction struct {
Prefix string
Key string
Recursive bool
Sorted bool
Quorum bool
}
func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, g.Prefix, g.Key)
params := u.Query()
params.Set("recursive", strconv.FormatBool(g.Recursive))
params.Set("sorted", strconv.FormatBool(g.Sorted))
params.Set("quorum", strconv.FormatBool(g.Quorum))
u.RawQuery = params.Encode()
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type waitAction struct {
Prefix string
Key string
WaitIndex uint64
Recursive bool
}
func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, w.Prefix, w.Key)
params := u.Query()
params.Set("wait", "true")
params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10))
params.Set("recursive", strconv.FormatBool(w.Recursive))
u.RawQuery = params.Encode()
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type setAction struct {
Prefix string
Key string
Value string
PrevValue string
PrevIndex uint64
PrevExist PrevExistType
TTL time.Duration
Refresh bool
Dir bool
NoValueOnSuccess bool
}
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, a.Prefix, a.Key)
params := u.Query()
form := url.Values{}
// we're either creating a directory or setting a key
if a.Dir {
params.Set("dir", strconv.FormatBool(a.Dir))
} else {
// These options are only valid for setting a key
if a.PrevValue != "" {
params.Set("prevValue", a.PrevValue)
}
form.Add("value", a.Value)
}
// Options which apply to both setting a key and creating a dir
if a.PrevIndex != 0 {
params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
}
if a.PrevExist != PrevIgnore {
params.Set("prevExist", string(a.PrevExist))
}
if a.TTL > 0 {
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
}
if a.Refresh {
form.Add("refresh", "true")
}
if a.NoValueOnSuccess {
params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
}
u.RawQuery = params.Encode()
body := strings.NewReader(form.Encode())
req, _ := http.NewRequest("PUT", u.String(), body)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
type deleteAction struct {
Prefix string
Key string
PrevValue string
PrevIndex uint64
Dir bool
Recursive bool
}
func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, a.Prefix, a.Key)
params := u.Query()
if a.PrevValue != "" {
params.Set("prevValue", a.PrevValue)
}
if a.PrevIndex != 0 {
params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
}
if a.Dir {
params.Set("dir", "true")
}
if a.Recursive {
params.Set("recursive", "true")
}
u.RawQuery = params.Encode()
req, _ := http.NewRequest("DELETE", u.String(), nil)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
type createInOrderAction struct {
Prefix string
Dir string
Value string
TTL time.Duration
}
func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, a.Prefix, a.Dir)
form := url.Values{}
form.Add("value", a.Value)
if a.TTL > 0 {
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
}
body := strings.NewReader(form.Encode())
req, _ := http.NewRequest("POST", u.String(), body)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) {
switch code {
case http.StatusOK, http.StatusCreated:
if len(body) == 0 {
return nil, ErrEmptyBody
}
res, err = unmarshalSuccessfulKeysResponse(header, body)
default:
err = unmarshalFailedKeysResponse(body)
}
return res, err
}
func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
var res Response
err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res)
if err != nil {
return nil, ErrInvalidJSON
}
if header.Get("X-Etcd-Index") != "" {
res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64)
if err != nil {
return nil, err
}
}
res.ClusterID = header.Get("X-Etcd-Cluster-ID")
return &res, nil
}
func unmarshalFailedKeysResponse(body []byte) error {
var etcdErr Error
if err := json.Unmarshal(body, &etcdErr); err != nil {
return ErrInvalidJSON
}
return etcdErr
}

View file

@ -1,303 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"path"
"github.com/coreos/etcd/pkg/types"
)
var (
defaultV2MembersPrefix = "/v2/members"
defaultLeaderSuffix = "/leader"
)
type Member struct {
// ID is the unique identifier of this Member.
ID string `json:"id"`
// Name is a human-readable, non-unique identifier of this Member.
Name string `json:"name"`
// PeerURLs represents the HTTP(S) endpoints this Member uses to
// participate in etcd's consensus protocol.
PeerURLs []string `json:"peerURLs"`
// ClientURLs represents the HTTP(S) endpoints on which this Member
// serves its client-facing APIs.
ClientURLs []string `json:"clientURLs"`
}
type memberCollection []Member
func (c *memberCollection) UnmarshalJSON(data []byte) error {
d := struct {
Members []Member
}{}
if err := json.Unmarshal(data, &d); err != nil {
return err
}
if d.Members == nil {
*c = make([]Member, 0)
return nil
}
*c = d.Members
return nil
}
type memberCreateOrUpdateRequest struct {
PeerURLs types.URLs
}
func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) {
s := struct {
PeerURLs []string `json:"peerURLs"`
}{
PeerURLs: make([]string, len(m.PeerURLs)),
}
for i, u := range m.PeerURLs {
s.PeerURLs[i] = u.String()
}
return json.Marshal(&s)
}
// NewMembersAPI constructs a new MembersAPI that uses HTTP to
// interact with etcd's membership API.
func NewMembersAPI(c Client) MembersAPI {
return &httpMembersAPI{
client: c,
}
}
type MembersAPI interface {
// List enumerates the current cluster membership.
List(ctx context.Context) ([]Member, error)
// Add instructs etcd to accept a new Member into the cluster.
Add(ctx context.Context, peerURL string) (*Member, error)
// Remove demotes an existing Member out of the cluster.
Remove(ctx context.Context, mID string) error
// Update instructs etcd to update an existing Member in the cluster.
Update(ctx context.Context, mID string, peerURLs []string) error
// Leader gets current leader of the cluster
Leader(ctx context.Context) (*Member, error)
}
type httpMembersAPI struct {
client httpClient
}
func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
req := &membersAPIActionList{}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var mCollection memberCollection
if err := json.Unmarshal(body, &mCollection); err != nil {
return nil, err
}
return []Member(mCollection), nil
}
func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
urls, err := types.NewURLs([]string{peerURL})
if err != nil {
return nil, err
}
req := &membersAPIActionAdd{peerURLs: urls}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
return nil, err
}
if resp.StatusCode != http.StatusCreated {
var merr membersError
if err := json.Unmarshal(body, &merr); err != nil {
return nil, err
}
return nil, merr
}
var memb Member
if err := json.Unmarshal(body, &memb); err != nil {
return nil, err
}
return &memb, nil
}
func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error {
urls, err := types.NewURLs(peerURLs)
if err != nil {
return err
}
req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
var merr membersError
if err := json.Unmarshal(body, &merr); err != nil {
return err
}
return merr
}
return nil
}
func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
req := &membersAPIActionRemove{memberID: memberID}
resp, _, err := m.client.Do(ctx, req)
if err != nil {
return err
}
return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
}
func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
req := &membersAPIActionLeader{}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var leader Member
if err := json.Unmarshal(body, &leader); err != nil {
return nil, err
}
return &leader, nil
}
type membersAPIActionList struct{}
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type membersAPIActionRemove struct {
memberID string
}
func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, d.memberID)
req, _ := http.NewRequest("DELETE", u.String(), nil)
return req
}
type membersAPIActionAdd struct {
peerURLs types.URLs
}
func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
b, _ := json.Marshal(&m)
req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
req.Header.Set("Content-Type", "application/json")
return req
}
type membersAPIActionUpdate struct {
memberID string
peerURLs types.URLs
}
func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
u.Path = path.Join(u.Path, a.memberID)
b, _ := json.Marshal(&m)
req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
req.Header.Set("Content-Type", "application/json")
return req
}
func assertStatusCode(got int, want ...int) (err error) {
for _, w := range want {
if w == got {
return nil
}
}
return fmt.Errorf("unexpected status code %d", got)
}
type membersAPIActionLeader struct{}
func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, defaultLeaderSuffix)
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
// v2MembersURL add the necessary path to the provided endpoint
// to route requests to the default v2 members API.
func v2MembersURL(ep url.URL) *url.URL {
ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
return &ep
}
type membersError struct {
Message string `json:"message"`
Code int `json:"-"`
}
func (e membersError) Error() string {
return e.Message
}

View file

@ -1,53 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"regexp"
)
var (
roleNotFoundRegExp *regexp.Regexp
userNotFoundRegExp *regexp.Regexp
)
func init() {
roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
}
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
func IsKeyNotFound(err error) bool {
if cErr, ok := err.(Error); ok {
return cErr.Code == ErrorCodeKeyNotFound
}
return false
}
// IsRoleNotFound returns true if the error means role not found of v2 API.
func IsRoleNotFound(err error) bool {
if ae, ok := err.(authError); ok {
return roleNotFoundRegExp.MatchString(ae.Message)
}
return false
}
// IsUserNotFound returns true if the error means user not found of v2 API.
func IsUserNotFound(err error) bool {
if ae, ok := err.(authError); ok {
return userNotFoundRegExp.MatchString(ae.Message)
}
return false
}

View file

@ -1,31 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pathutil implements utility functions for handling slash-separated
// paths.
package pathutil
import "path"
// CanonicalURLPath returns the canonical url path for p, which follows the rules:
// 1. the path always starts with "/"
// 2. replace multiple slashes with a single slash
// 3. replace each '.' '..' path name element with equivalent one
// 4. keep the trailing slash
// The function is borrowed from stdlib http.cleanPath in server.go.
func CanonicalURLPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root,
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}

View file

@ -1,141 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package srv looks up DNS SRV records.
package srv
import (
"fmt"
"net"
"net/url"
"strings"
"github.com/coreos/etcd/pkg/types"
)
var (
// indirection for testing
lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
resolveTCPAddr = net.ResolveTCPAddr
)
// GetCluster gets the cluster information via DNS discovery.
// Also sees each entry as a separate instance.
func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) {
tempName := int(0)
tcp2ap := make(map[string]url.URL)
// First, resolve the apurls
for _, url := range apurls {
tcpAddr, err := resolveTCPAddr("tcp", url.Host)
if err != nil {
return nil, err
}
tcp2ap[tcpAddr.String()] = url
}
stringParts := []string{}
updateNodeMap := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", dns)
if err != nil {
return err
}
for _, srv := range addrs {
port := fmt.Sprintf("%d", srv.Port)
host := net.JoinHostPort(srv.Target, port)
tcpAddr, terr := resolveTCPAddr("tcp", host)
if terr != nil {
err = terr
continue
}
n := ""
url, ok := tcp2ap[tcpAddr.String()]
if ok {
n = name
}
if n == "" {
n = fmt.Sprintf("%d", tempName)
tempName++
}
// SRV records have a trailing dot but URL shouldn't.
shortHost := strings.TrimSuffix(srv.Target, ".")
urlHost := net.JoinHostPort(shortHost, port)
if ok && url.Scheme != scheme {
err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
} else {
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
}
}
if len(stringParts) == 0 {
return err
}
return nil
}
failCount := 0
err := updateNodeMap(service+"-ssl", "https")
srvErr := make([]string, 2)
if err != nil {
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err)
failCount++
}
err = updateNodeMap(service, "http")
if err != nil {
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err)
failCount++
}
if failCount == 2 {
return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1])
}
return stringParts, nil
}
type SRVClients struct {
Endpoints []string
SRVs []*net.SRV
}
// GetClient looks up the client endpoints for a service and domain.
func GetClient(service, domain string) (*SRVClients, error) {
var urls []*url.URL
var srvs []*net.SRV
updateURLs := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", domain)
if err != nil {
return err
}
for _, srv := range addrs {
urls = append(urls, &url.URL{
Scheme: scheme,
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
})
}
srvs = append(srvs, addrs...)
return nil
}
errHTTPS := updateURLs(service+"-ssl", "https")
errHTTP := updateURLs(service, "http")
if errHTTPS != nil && errHTTP != nil {
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
}
endpoints := make([]string, len(urls))
for i := range urls {
endpoints[i] = urls[i].String()
}
return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
}

View file

@ -1,56 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package version implements etcd version parsing and contains latest version
// information.
package version
import (
"fmt"
"strings"
"github.com/coreos/go-semver/semver"
)
var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0"
Version = "3.3.5"
APIVersion = "unknown"
// Git SHA Value will be set during build
GitSHA = "Not provided (use ./build instead of go build)"
)
func init() {
ver, err := semver.NewVersion(Version)
if err == nil {
APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
}
}
type Versions struct {
Server string `json:"etcdserver"`
Cluster string `json:"etcdcluster"`
// TODO: raft state machine version
}
// Cluster only keeps the major.minor.
func Cluster(v string) string {
vs := strings.Split(v, ".")
if len(vs) <= 2 {
return v
}
return fmt.Sprintf("%s.%s", vs[0], vs[1])
}

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,268 +0,0 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Semantic Versions http://semver.org
package semver
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
)
type Version struct {
Major int64
Minor int64
Patch int64
PreRelease PreRelease
Metadata string
}
type PreRelease string
func splitOff(input *string, delim string) (val string) {
parts := strings.SplitN(*input, delim, 2)
if len(parts) == 2 {
*input = parts[0]
val = parts[1]
}
return val
}
func New(version string) *Version {
return Must(NewVersion(version))
}
func NewVersion(version string) (*Version, error) {
v := Version{}
if err := v.Set(version); err != nil {
return nil, err
}
return &v, nil
}
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
func Must(v *Version, err error) *Version {
if err != nil {
panic(err)
}
return v
}
// Set parses and updates v from the given version string. Implements flag.Value
func (v *Version) Set(version string) error {
metadata := splitOff(&version, "+")
preRelease := PreRelease(splitOff(&version, "-"))
dotParts := strings.SplitN(version, ".", 3)
if len(dotParts) != 3 {
return fmt.Errorf("%s is not in dotted-tri format", version)
}
parsed := make([]int64, 3, 3)
for i, v := range dotParts[:3] {
val, err := strconv.ParseInt(v, 10, 64)
parsed[i] = val
if err != nil {
return err
}
}
v.Metadata = metadata
v.PreRelease = preRelease
v.Major = parsed[0]
v.Minor = parsed[1]
v.Patch = parsed[2]
return nil
}
func (v Version) String() string {
var buffer bytes.Buffer
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
if v.PreRelease != "" {
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
}
if v.Metadata != "" {
fmt.Fprintf(&buffer, "+%s", v.Metadata)
}
return buffer.String()
}
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
var data string
if err := unmarshal(&data); err != nil {
return err
}
return v.Set(data)
}
func (v Version) MarshalJSON() ([]byte, error) {
return []byte(`"` + v.String() + `"`), nil
}
func (v *Version) UnmarshalJSON(data []byte) error {
l := len(data)
if l == 0 || string(data) == `""` {
return nil
}
if l < 2 || data[0] != '"' || data[l-1] != '"' {
return errors.New("invalid semver string")
}
return v.Set(string(data[1 : l-1]))
}
// Compare tests if v is less than, equal to, or greater than versionB,
// returning -1, 0, or +1 respectively.
func (v Version) Compare(versionB Version) int {
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
return cmp
}
return preReleaseCompare(v, versionB)
}
// Equal tests if v is equal to versionB.
func (v Version) Equal(versionB Version) bool {
return v.Compare(versionB) == 0
}
// LessThan tests if v is less than versionB.
func (v Version) LessThan(versionB Version) bool {
return v.Compare(versionB) < 0
}
// Slice converts the comparable parts of the semver into a slice of integers.
func (v Version) Slice() []int64 {
return []int64{v.Major, v.Minor, v.Patch}
}
func (p PreRelease) Slice() []string {
preRelease := string(p)
return strings.Split(preRelease, ".")
}
func preReleaseCompare(versionA Version, versionB Version) int {
a := versionA.PreRelease
b := versionB.PreRelease
/* Handle the case where if two versions are otherwise equal it is the
* one without a PreRelease that is greater */
if len(a) == 0 && (len(b) > 0) {
return 1
} else if len(b) == 0 && (len(a) > 0) {
return -1
}
// If there is a prerelease, check and compare each part.
return recursivePreReleaseCompare(a.Slice(), b.Slice())
}
func recursiveCompare(versionA []int64, versionB []int64) int {
if len(versionA) == 0 {
return 0
}
a := versionA[0]
b := versionB[0]
if a > b {
return 1
} else if a < b {
return -1
}
return recursiveCompare(versionA[1:], versionB[1:])
}
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
// A larger set of pre-release fields has a higher precedence than a smaller set,
// if all of the preceding identifiers are equal.
if len(versionA) == 0 {
if len(versionB) > 0 {
return -1
}
return 0
} else if len(versionB) == 0 {
// We're longer than versionB so return 1.
return 1
}
a := versionA[0]
b := versionB[0]
aInt := false
bInt := false
aI, err := strconv.Atoi(versionA[0])
if err == nil {
aInt = true
}
bI, err := strconv.Atoi(versionB[0])
if err == nil {
bInt = true
}
// Handle Integer Comparison
if aInt && bInt {
if aI > bI {
return 1
} else if aI < bI {
return -1
}
}
// Handle String Comparison
if a > b {
return 1
} else if a < b {
return -1
}
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
}
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
func (v *Version) BumpMajor() {
v.Major += 1
v.Minor = 0
v.Patch = 0
v.PreRelease = PreRelease("")
v.Metadata = ""
}
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
func (v *Version) BumpMinor() {
v.Minor += 1
v.Patch = 0
v.PreRelease = PreRelease("")
v.Metadata = ""
}
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
func (v *Version) BumpPatch() {
v.Patch += 1
v.PreRelease = PreRelease("")
v.Metadata = ""
}

View file

@ -1,38 +0,0 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semver
import (
"sort"
)
type Versions []*Version
func (s Versions) Len() int {
return len(s)
}
func (s Versions) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s Versions) Less(i, j int) bool {
return s[i].LessThan(*s[j])
}
// Sort sorts the given slice of Version
func Sort(versions []*Version) {
sort.Sort(Versions(versions))
}

22
vendor/github.com/ugorji/go/LICENSE generated vendored
View file

@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2012-2015 Ugorji Nwoke.
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,264 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
/*
Package codec provides a
High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library
for binc, msgpack, cbor, json.
Supported Serialization formats are:
- msgpack: https://github.com/msgpack/msgpack
- binc: http://github.com/ugorji/binc
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- json: http://json.org http://tools.ietf.org/html/rfc7159
- simple:
To install:
go get github.com/ugorji/go/codec
This package will carefully use 'unsafe' for performance reasons in specific places.
You can build without unsafe use by passing the safe or appengine tag
i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3
go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from
go 1.7+ . This is because supporting unsafe requires knowledge of implementation details.
For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
The idiomatic Go support is as seen in other encoding packages in
the standard library (ie json, xml, gob, etc).
Rich Feature Set includes:
- Simple but extremely powerful and feature-rich API
- Support for go1.4 and above, while selectively using newer APIs for later releases
- Excellent code coverage ( > 90% )
- Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
- Careful selected use of 'unsafe' for targeted performance gains.
100% mode exists where 'unsafe' is not used at all.
- Lock-free (sans mutex) concurrency for scaling to 100's of cores
- Coerce types where appropriate
e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc
- Corner Cases:
Overflows, nil maps/slices, nil values in streams are handled correctly
- Standard field renaming via tags
- Support for omitting empty fields during an encoding
- Encoding from any value and decoding into pointer to any value
(struct, slice, map, primitives, pointers, interface{}, etc)
- Extensions to support efficient encoding/decoding of any named types
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
- Support IsZero() bool to determine if a value is a zero value.
Analogous to time.Time.IsZero() bool.
- Decoding without a schema (into a interface{}).
Includes Options to configure what specific map or slice type to use
when decoding an encoded list or map into a nil interface{}
- Mapping a non-interface type to an interface, so we can decode appropriately
into any interface type with a correctly configured non-interface value.
- Encode a struct as an array, and decode struct from an array in the data stream
- Option to encode struct keys as numbers (instead of strings)
(to support structured streams with fields encoded as numeric codes)
- Comprehensive support for anonymous fields
- Fast (no-reflection) encoding/decoding of common maps and slices
- Code-generation for faster performance.
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
- Support indefinite-length formats to enable true streaming
(for formats which support it e.g. json, cbor)
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
This mostly applies to maps, where iteration order is non-deterministic.
- NIL in data stream decoded as zero value
- Never silently skip data when decoding.
User decides whether to return an error or silently skip data when keys or indexes
in the data stream do not map to fields in the struct.
- Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
- Encode/Decode from/to chan types (for iterative streaming support)
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
- Handle unique idiosyncrasies of codecs e.g.
- For messagepack, configure how ambiguities in handling raw bytes are resolved
- For messagepack, provide rpc server/client codec to support
msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
Extension Support
Users can register a function to handle the encoding or decoding of
their custom types.
There are no restrictions on what the custom type can be. Some examples:
type BisSet []int
type BitSet64 uint64
type UUID string
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
type GifImage struct { ... }
As an illustration, MyStructWithUnexportedFields would normally be
encoded as an empty map because it has no exported fields, while UUID
would be encoded as a string. However, with extension support, you can
encode any of these however you like.
Custom Encoding and Decoding
This package maintains symmetry in the encoding and decoding halfs.
We determine how to encode or decode by walking this decision tree
- is type a codec.Selfer?
- is there an extension registered for the type?
- is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
- is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
- is format text-based, and type an encoding.TextMarshaler?
- else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
This symmetry is important to reduce chances of issues happening because the
encoding and decoding sides are out of sync e.g. decoded via very specific
encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
Consequently, if a type only defines one-half of the symmetry
(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
then that type doesn't satisfy the check and we will continue walking down the
decision tree.
RPC
RPC Client and Server Codecs are implemented, so the codecs can be used
with the standard net/rpc package.
Usage
The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
The Encoder and Decoder are NOT safe for concurrent use.
Consequently, the usage model is basically:
- Create and initialize the Handle before any use.
Once created, DO NOT modify it.
- Multiple Encoders or Decoders can now use the Handle concurrently.
They only read information off the Handle (never write).
- However, each Encoder or Decoder MUST not be used concurrently
- To re-use an Encoder/Decoder, call Reset(...) on it first.
This allows you use state maintained on the Encoder/Decoder.
Sample usage model:
// create and configure Handle
var (
bh codec.BincHandle
mh codec.MsgpackHandle
ch codec.CborHandle
)
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
// configure extensions
// e.g. for msgpack, define functions and enable Time support for tag 1
// mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
// create and use decoder/encoder
var (
r io.Reader
w io.Writer
b []byte
h = &bh // or mh to use msgpack
)
dec = codec.NewDecoder(r, h)
dec = codec.NewDecoderBytes(b, h)
err = dec.Decode(&v)
enc = codec.NewEncoder(w, h)
enc = codec.NewEncoderBytes(&b, h)
err = enc.Encode(v)
//RPC Server
go func() {
for {
conn, err := listener.Accept()
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
rpc.ServeCodec(rpcCodec)
}
}()
//RPC Communication (client side)
conn, err = net.Dial("tcp", "localhost:5555")
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
client := rpc.NewClientWithCodec(rpcCodec)
Running Tests
To run tests, use the following:
go test
To run the full suite of tests, use the following:
go test -tags alltests -run Suite
You can run the tag 'safe' to run tests or build in safe mode. e.g.
go test -tags safe -run Json
go test -tags "alltests safe" -run Suite
Running Benchmarks
Please see http://github.com/ugorji/go-codec-bench .
Caveats
Struct fields matching the following are ignored during encoding and decoding
- struct tag value set to -
- func, complex numbers, unsafe pointers
- unexported and not embedded
- unexported and embedded and not struct kind
- unexported and embedded pointers (from go1.10)
Every other field in a struct will be encoded/decoded.
Embedded fields are encoded as if they exist in the top-level struct,
with some caveats. See Encode documentation.
*/
package codec
// TODO:
// - For Go 1.11, when mid-stack inlining is enabled,
// we should use committed functions for writeXXX and readXXX calls.
// This involves uncommenting the methods for decReaderSwitch and encWriterSwitch
// and using those (decReaderSwitch and encWriterSwitch) in all handles
// instead of encWriter and decReader.
// The benefit is that, for the (En|De)coder over []byte, the encWriter/decReader
// will be inlined, giving a performance bump for that typical case.
// However, it will only be inlined if mid-stack inlining is enabled,
// as we call panic to raise errors, and panic currently prevents inlining.
//
// PUNTED:
// - To make Handle comparable, make extHandle in BasicHandle a non-embedded pointer,
// and use overlay methods on *BasicHandle to call through to extHandle after initializing
// the "xh *extHandle" to point to a real slice.
//
// BEFORE EACH RELEASE:
// - Look through and fix padding for each type, to eliminate false sharing
// - critical shared objects that are read many times
// TypeInfos
// - pooled objects:
// decNaked, decNakedContainers, codecFner, typeInfoLoadArray,
// - small objects allocated independently, that we read/use much across threads:
// codecFn, typeInfo
// - Objects allocated independently and used a lot
// Decoder, Encoder,
// xxxHandle, xxxEncDriver, xxxDecDriver (xxx = json, msgpack, cbor, binc, simple)
// - In all above, arrange values modified together to be close to each other.
//
// For all of these, either ensure that they occupy full cache lines,
// or ensure that the things just past the cache line boundary are hardly read/written
// e.g. JsonHandle.RawBytesExt - which is copied into json(En|De)cDriver at init
//
// Occupying full cache lines means they occupy 8*N words (where N is an integer).
// Check this out by running: ./run.sh -z
// - look at those tagged ****, meaning they are not occupying full cache lines
// - look at those tagged <<<<, meaning they are larger than 32 words (something to watch)
// - Run "golint -min_confidence 0.81"

File diff suppressed because it is too large Load diff

View file

@ -1,756 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"math"
"reflect"
"time"
)
const (
cborMajorUint byte = iota
cborMajorNegInt
cborMajorBytes
cborMajorText
cborMajorArray
cborMajorMap
cborMajorTag
cborMajorOther
)
const (
cborBdFalse byte = 0xf4 + iota
cborBdTrue
cborBdNil
cborBdUndefined
cborBdExt
cborBdFloat16
cborBdFloat32
cborBdFloat64
)
const (
cborBdIndefiniteBytes byte = 0x5f
cborBdIndefiniteString = 0x7f
cborBdIndefiniteArray = 0x9f
cborBdIndefiniteMap = 0xbf
cborBdBreak = 0xff
)
// These define some in-stream descriptors for
// manual encoding e.g. when doing explicit indefinite-length
const (
CborStreamBytes byte = 0x5f
CborStreamString = 0x7f
CborStreamArray = 0x9f
CborStreamMap = 0xbf
CborStreamBreak = 0xff
)
const (
cborBaseUint byte = 0x00
cborBaseNegInt = 0x20
cborBaseBytes = 0x40
cborBaseString = 0x60
cborBaseArray = 0x80
cborBaseMap = 0xa0
cborBaseTag = 0xc0
cborBaseSimple = 0xe0
)
func cbordesc(bd byte) string {
switch bd {
case cborBdNil:
return "nil"
case cborBdFalse:
return "false"
case cborBdTrue:
return "true"
case cborBdFloat16, cborBdFloat32, cborBdFloat64:
return "float"
case cborBdIndefiniteBytes:
return "bytes*"
case cborBdIndefiniteString:
return "string*"
case cborBdIndefiniteArray:
return "array*"
case cborBdIndefiniteMap:
return "map*"
default:
switch {
case bd >= cborBaseUint && bd < cborBaseNegInt:
return "(u)int"
case bd >= cborBaseNegInt && bd < cborBaseBytes:
return "int"
case bd >= cborBaseBytes && bd < cborBaseString:
return "bytes"
case bd >= cborBaseString && bd < cborBaseArray:
return "string"
case bd >= cborBaseArray && bd < cborBaseMap:
return "array"
case bd >= cborBaseMap && bd < cborBaseTag:
return "map"
case bd >= cborBaseTag && bd < cborBaseSimple:
return "ext"
default:
return "unknown"
}
}
}
// -------------------
type cborEncDriver struct {
noBuiltInTypes
encDriverNoopContainerWriter
// encNoSeparator
e *Encoder
w encWriter
h *CborHandle
x [8]byte
_ [3]uint64 // padding
}
func (e *cborEncDriver) EncodeNil() {
e.w.writen1(cborBdNil)
}
func (e *cborEncDriver) EncodeBool(b bool) {
if b {
e.w.writen1(cborBdTrue)
} else {
e.w.writen1(cborBdFalse)
}
}
func (e *cborEncDriver) EncodeFloat32(f float32) {
e.w.writen1(cborBdFloat32)
bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
}
func (e *cborEncDriver) EncodeFloat64(f float64) {
e.w.writen1(cborBdFloat64)
bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
}
func (e *cborEncDriver) encUint(v uint64, bd byte) {
if v <= 0x17 {
e.w.writen1(byte(v) + bd)
} else if v <= math.MaxUint8 {
e.w.writen2(bd+0x18, uint8(v))
} else if v <= math.MaxUint16 {
e.w.writen1(bd + 0x19)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v))
} else if v <= math.MaxUint32 {
e.w.writen1(bd + 0x1a)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v))
} else { // if v <= math.MaxUint64 {
e.w.writen1(bd + 0x1b)
bigenHelper{e.x[:8], e.w}.writeUint64(v)
}
}
func (e *cborEncDriver) EncodeInt(v int64) {
if v < 0 {
e.encUint(uint64(-1-v), cborBaseNegInt)
} else {
e.encUint(uint64(v), cborBaseUint)
}
}
func (e *cborEncDriver) EncodeUint(v uint64) {
e.encUint(v, cborBaseUint)
}
func (e *cborEncDriver) encLen(bd byte, length int) {
e.encUint(uint64(length), bd)
}
func (e *cborEncDriver) EncodeTime(t time.Time) {
if t.IsZero() {
e.EncodeNil()
} else if e.h.TimeRFC3339 {
e.encUint(0, cborBaseTag)
e.EncodeString(cUTF8, t.Format(time.RFC3339Nano))
} else {
e.encUint(1, cborBaseTag)
t = t.UTC().Round(time.Microsecond)
sec, nsec := t.Unix(), uint64(t.Nanosecond())
if nsec == 0 {
e.EncodeInt(sec)
} else {
e.EncodeFloat64(float64(sec) + float64(nsec)/1e9)
}
}
}
func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
e.encUint(uint64(xtag), cborBaseTag)
if v := ext.ConvertExt(rv); v == nil {
e.EncodeNil()
} else {
en.encode(v)
}
}
func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
e.encUint(uint64(re.Tag), cborBaseTag)
if false && re.Data != nil {
en.encode(re.Data)
} else if re.Value != nil {
en.encode(re.Value)
} else {
e.EncodeNil()
}
}
func (e *cborEncDriver) WriteArrayStart(length int) {
if e.h.IndefiniteLength {
e.w.writen1(cborBdIndefiniteArray)
} else {
e.encLen(cborBaseArray, length)
}
}
func (e *cborEncDriver) WriteMapStart(length int) {
if e.h.IndefiniteLength {
e.w.writen1(cborBdIndefiniteMap)
} else {
e.encLen(cborBaseMap, length)
}
}
func (e *cborEncDriver) WriteMapEnd() {
if e.h.IndefiniteLength {
e.w.writen1(cborBdBreak)
}
}
func (e *cborEncDriver) WriteArrayEnd() {
if e.h.IndefiniteLength {
e.w.writen1(cborBdBreak)
}
}
func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
e.encStringBytesS(cborBaseString, v)
}
func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
if v == nil {
e.EncodeNil()
} else if c == cRAW {
e.encStringBytesS(cborBaseBytes, stringView(v))
} else {
e.encStringBytesS(cborBaseString, stringView(v))
}
}
func (e *cborEncDriver) encStringBytesS(bb byte, v string) {
if e.h.IndefiniteLength {
if bb == cborBaseBytes {
e.w.writen1(cborBdIndefiniteBytes)
} else {
e.w.writen1(cborBdIndefiniteString)
}
blen := len(v) / 4
if blen == 0 {
blen = 64
} else if blen > 1024 {
blen = 1024
}
for i := 0; i < len(v); {
var v2 string
i2 := i + blen
if i2 < len(v) {
v2 = v[i:i2]
} else {
v2 = v[i:]
}
e.encLen(bb, len(v2))
e.w.writestr(v2)
i = i2
}
e.w.writen1(cborBdBreak)
} else {
e.encLen(bb, len(v))
e.w.writestr(v)
}
}
// ----------------------
type cborDecDriver struct {
d *Decoder
h *CborHandle
r decReader
// b [scratchByteArrayLen]byte
br bool // bytes reader
bdRead bool
bd byte
noBuiltInTypes
// decNoSeparator
decDriverNoopContainerReader
_ [3]uint64 // padding
}
func (d *cborDecDriver) readNextBd() {
d.bd = d.r.readn1()
d.bdRead = true
}
func (d *cborDecDriver) uncacheRead() {
if d.bdRead {
d.r.unreadn1()
d.bdRead = false
}
}
func (d *cborDecDriver) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdNil {
return valueTypeNil
} else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
return valueTypeBytes
} else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) {
return valueTypeString
} else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
return valueTypeArray
} else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
return valueTypeMap
}
// else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
// }
return valueTypeUnset
}
func (d *cborDecDriver) TryDecodeAsNil() bool {
if !d.bdRead {
d.readNextBd()
}
// treat Nil and Undefined as nil values
if d.bd == cborBdNil || d.bd == cborBdUndefined {
d.bdRead = false
return true
}
return false
}
func (d *cborDecDriver) CheckBreak() bool {
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdBreak {
d.bdRead = false
return true
}
return false
}
func (d *cborDecDriver) decUint() (ui uint64) {
v := d.bd & 0x1f
if v <= 0x17 {
ui = uint64(v)
} else {
if v == 0x18 {
ui = uint64(d.r.readn1())
} else if v == 0x19 {
ui = uint64(bigen.Uint16(d.r.readx(2)))
} else if v == 0x1a {
ui = uint64(bigen.Uint32(d.r.readx(4)))
} else if v == 0x1b {
ui = uint64(bigen.Uint64(d.r.readx(8)))
} else {
d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd))
return
}
}
return
}
func (d *cborDecDriver) decCheckInteger() (neg bool) {
if !d.bdRead {
d.readNextBd()
}
major := d.bd >> 5
if major == cborMajorUint {
} else if major == cborMajorNegInt {
neg = true
} else {
d.d.errorf("not an integer - invalid major %v from descriptor %x/%s", major, d.bd, cbordesc(d.bd))
return
}
return
}
func (d *cborDecDriver) DecodeInt64() (i int64) {
neg := d.decCheckInteger()
ui := d.decUint()
// check if this number can be converted to an int without overflow
if neg {
i = -(chkOvf.SignedIntV(ui + 1))
} else {
i = chkOvf.SignedIntV(ui)
}
d.bdRead = false
return
}
func (d *cborDecDriver) DecodeUint64() (ui uint64) {
if d.decCheckInteger() {
d.d.errorf("assigning negative signed value to unsigned type")
return
}
ui = d.decUint()
d.bdRead = false
return
}
func (d *cborDecDriver) DecodeFloat64() (f float64) {
if !d.bdRead {
d.readNextBd()
}
if bd := d.bd; bd == cborBdFloat16 {
f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2)))))
} else if bd == cborBdFloat32 {
f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
} else if bd == cborBdFloat64 {
f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
} else if bd >= cborBaseUint && bd < cborBaseBytes {
f = float64(d.DecodeInt64())
} else {
d.d.errorf("float only valid from float16/32/64 - invalid descriptor %x/%s", bd, cbordesc(bd))
return
}
d.bdRead = false
return
}
// bool can be decoded from bool only (single byte).
func (d *cborDecDriver) DecodeBool() (b bool) {
if !d.bdRead {
d.readNextBd()
}
if bd := d.bd; bd == cborBdTrue {
b = true
} else if bd == cborBdFalse {
} else {
d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd))
return
}
d.bdRead = false
return
}
func (d *cborDecDriver) ReadMapStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
d.bdRead = false
if d.bd == cborBdIndefiniteMap {
return -1
}
return d.decLen()
}
func (d *cborDecDriver) ReadArrayStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
d.bdRead = false
if d.bd == cborBdIndefiniteArray {
return -1
}
return d.decLen()
}
func (d *cborDecDriver) decLen() int {
return int(d.decUint())
}
func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
d.bdRead = false
for {
if d.CheckBreak() {
break
}
if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
d.d.errorf("expect bytes/string major type in indefinite string/bytes;"+
" got major %v from descriptor %x/%x", major, d.bd, cbordesc(d.bd))
return nil
}
n := d.decLen()
oldLen := len(bs)
newLen := oldLen + n
if newLen > cap(bs) {
bs2 := make([]byte, newLen, 2*cap(bs)+n)
copy(bs2, bs)
bs = bs2
} else {
bs = bs[:newLen]
}
d.r.readb(bs[oldLen:newLen])
// bs = append(bs, d.r.readn()...)
d.bdRead = false
}
d.bdRead = false
return bs
}
func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdNil || d.bd == cborBdUndefined {
d.bdRead = false
return nil
}
if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
d.bdRead = false
if bs == nil {
if zerocopy {
return d.decAppendIndefiniteBytes(d.d.b[:0])
}
return d.decAppendIndefiniteBytes(zeroByteSlice)
}
return d.decAppendIndefiniteBytes(bs[:0])
}
// check if an "array" of uint8's (see ContainerType for how to infer if an array)
if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
return
}
clen := d.decLen()
d.bdRead = false
if zerocopy {
if d.br {
return d.r.readx(clen)
} else if len(bs) == 0 {
bs = d.d.b[:]
}
}
return decByteSlice(d.r, clen, d.h.MaxInitLen, bs)
}
func (d *cborDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.d.b[:], true))
}
func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) {
return d.DecodeBytes(d.d.b[:], true)
}
func (d *cborDecDriver) DecodeTime() (t time.Time) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdNil || d.bd == cborBdUndefined {
d.bdRead = false
return
}
xtag := d.decUint()
d.bdRead = false
return d.decodeTime(xtag)
}
func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) {
if !d.bdRead {
d.readNextBd()
}
switch xtag {
case 0:
var err error
if t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes())); err != nil {
d.d.errorv(err)
}
case 1:
// decode an int64 or a float, and infer time.Time from there.
// for floats, round to microseconds, as that is what is guaranteed to fit well.
switch {
case d.bd == cborBdFloat16, d.bd == cborBdFloat32:
f1, f2 := math.Modf(d.DecodeFloat64())
t = time.Unix(int64(f1), int64(f2*1e9))
case d.bd == cborBdFloat64:
f1, f2 := math.Modf(d.DecodeFloat64())
t = time.Unix(int64(f1), int64(f2*1e9))
case d.bd >= cborBaseUint && d.bd < cborBaseNegInt,
d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
t = time.Unix(d.DecodeInt64(), 0)
default:
d.d.errorf("time.Time can only be decoded from a number (or RFC3339 string)")
}
default:
d.d.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag)
}
t = t.UTC().Round(time.Microsecond)
return
}
func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if !d.bdRead {
d.readNextBd()
}
u := d.decUint()
d.bdRead = false
realxtag = u
if ext == nil {
re := rv.(*RawExt)
re.Tag = realxtag
d.d.decode(&re.Value)
} else if xtag != realxtag {
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
return
} else {
var v interface{}
d.d.decode(&v)
ext.UpdateExt(rv, v)
}
d.bdRead = false
return
}
func (d *cborDecDriver) DecodeNaked() {
if !d.bdRead {
d.readNextBd()
}
n := d.d.n
var decodeFurther bool
switch d.bd {
case cborBdNil:
n.v = valueTypeNil
case cborBdFalse:
n.v = valueTypeBool
n.b = false
case cborBdTrue:
n.v = valueTypeBool
n.b = true
case cborBdFloat16, cborBdFloat32, cborBdFloat64:
n.v = valueTypeFloat
n.f = d.DecodeFloat64()
case cborBdIndefiniteBytes:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false)
case cborBdIndefiniteString:
n.v = valueTypeString
n.s = d.DecodeString()
case cborBdIndefiniteArray:
n.v = valueTypeArray
decodeFurther = true
case cborBdIndefiniteMap:
n.v = valueTypeMap
decodeFurther = true
default:
switch {
case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
if d.h.SignedInteger {
n.v = valueTypeInt
n.i = d.DecodeInt64()
} else {
n.v = valueTypeUint
n.u = d.DecodeUint64()
}
case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
n.v = valueTypeInt
n.i = d.DecodeInt64()
case d.bd >= cborBaseBytes && d.bd < cborBaseString:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false)
case d.bd >= cborBaseString && d.bd < cborBaseArray:
n.v = valueTypeString
n.s = d.DecodeString()
case d.bd >= cborBaseArray && d.bd < cborBaseMap:
n.v = valueTypeArray
decodeFurther = true
case d.bd >= cborBaseMap && d.bd < cborBaseTag:
n.v = valueTypeMap
decodeFurther = true
case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
n.v = valueTypeExt
n.u = d.decUint()
n.l = nil
if n.u == 0 || n.u == 1 {
d.bdRead = false
n.v = valueTypeTime
n.t = d.decodeTime(n.u)
}
// d.bdRead = false
// d.d.decode(&re.Value) // handled by decode itself.
// decodeFurther = true
default:
d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
return
}
}
if !decodeFurther {
d.bdRead = false
}
return
}
// -------------------------
// CborHandle is a Handle for the CBOR encoding format,
// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
//
// CBOR is comprehensively supported, including support for:
// - indefinite-length arrays/maps/bytes/strings
// - (extension) tags in range 0..0xffff (0 .. 65535)
// - half, single and double-precision floats
// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
// - nil, true, false, ...
// - arrays and maps, bytes and text strings
//
// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
// Users can implement them as needed (using SetExt), including spec-documented ones:
// - timestamp, BigNum, BigFloat, Decimals,
// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
type CborHandle struct {
binaryEncodingType
noElemSeparators
BasicHandle
// IndefiniteLength=true, means that we encode using indefinitelength
IndefiniteLength bool
// TimeRFC3339 says to encode time.Time using RFC3339 format.
// If unset, we encode time.Time using seconds past epoch.
TimeRFC3339 bool
// _ [1]uint64 // padding
}
// Name returns the name of the handle: cbor
func (h *CborHandle) Name() string { return "cbor" }
// SetInterfaceExt sets an extension
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext})
}
func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
return &cborEncDriver{e: e, w: e.w, h: h}
}
func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes}
}
func (e *cborEncDriver) reset() {
e.w = e.e.w
}
func (d *cborDecDriver) reset() {
d.r, d.br = d.d.r, d.d.bytes
d.bd, d.bdRead = 0, false
}
var _ decDriver = (*cborDecDriver)(nil)
var _ encDriver = (*cborEncDriver)(nil)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,47 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build notfastpath
package codec
import "reflect"
const fastpathEnabled = false
// The generated fast-path code is very large, and adds a few seconds to the build time.
// This causes test execution, execution of small tools which use codec, etc
// to take a long time.
//
// To mitigate, we now support the notfastpath tag.
// This tag disables fastpath during build, allowing for faster build, test execution,
// short-program runs, etc.
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false }
type fastpathT struct{}
type fastpathE struct {
rtid uintptr
rt reflect.Type
encfn func(*Encoder, *codecFnInfo, reflect.Value)
decfn func(*Decoder, *codecFnInfo, reflect.Value)
}
type fastpathA [0]fastpathE
func (x fastpathA) index(rtid uintptr) int { return -1 }
func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) {
fn := d.cfer().get(uint8SliceTyp, true, true)
d.kSlice(&fn.i, reflect.ValueOf(&v).Elem())
return v, true
}
var fastpathAV fastpathA
var fastpathTV fastpathT
// ----
type TestMammoth2Wrapper struct{} // to allow testMammoth work in notfastpath mode

View file

@ -1,335 +0,0 @@
/* // +build ignore */
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
package codec
import (
"encoding"
"reflect"
)
// GenVersion is the current version of codecgen.
const GenVersion = 8
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continuously and without notice.
//
// To help enforce this, we create an unexported type with exported members.
// The only way to get the type is via the one exported type that we control (somewhat).
//
// When static codecs are created for types, they will use this value
// to perform encoding or decoding of primitives or known slice or map types.
// GenHelperEncoder is exported so that it can be used externally by codecgen.
//
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) {
ge = genHelperEncoder{e: e}
ee = genHelperEncDriver{encDriver: e.e}
return
}
// GenHelperDecoder is exported so that it can be used externally by codecgen.
//
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) {
gd = genHelperDecoder{d: d}
dd = genHelperDecDriver{decDriver: d.d}
return
}
type genHelperEncDriver struct {
encDriver
}
func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {}
func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) {
encStructFieldKey(x.encDriver, keyType, s)
}
func (x genHelperEncDriver) EncodeSymbol(s string) {
x.encDriver.EncodeString(cUTF8, s)
}
type genHelperDecDriver struct {
decDriver
C checkOverflow
}
func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {}
func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte {
return decStructFieldKey(x.decDriver, keyType, buf)
}
func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) {
return x.C.IntV(x.decDriver.DecodeInt64(), bitsize)
}
func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
return x.C.UintV(x.decDriver.DecodeUint64(), bitsize)
}
func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
f = x.DecodeFloat64()
if chkOverflow32 && chkOvf.Float32(f) {
panicv.errorf("float32 overflow: %v", f)
}
return
}
func (x genHelperDecDriver) DecodeFloat32As64() (f float64) {
f = x.DecodeFloat64()
if chkOvf.Float32(f) {
panicv.errorf("float32 overflow: %v", f)
}
return
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct {
M must
e *Encoder
F fastpathT
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperDecoder struct {
C checkOverflow
d *Decoder
F fastpathT
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
return f.e.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) IsJSONHandle() bool {
return f.e.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// println(">>>>>>>>> EncFallback")
// f.e.encodeI(iv, false, false)
f.e.encodeValue(reflect.ValueOf(iv), nil, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
bs, fnerr := iv.MarshalText()
f.e.marshal(bs, fnerr, false, cUTF8)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
bs, fnerr := iv.MarshalJSON()
f.e.marshal(bs, fnerr, true, cUTF8)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
bs, fnerr := iv.MarshalBinary()
f.e.marshal(bs, fnerr, false, cRAW)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: builtin no longer supported - so we make this method a no-op,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return }
// func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
// if _, ok := f.e.hh.(*BincHandle); ok {
// return timeTypId
// }
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) I2Rtid(v interface{}) uintptr {
return i2rtid(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
return f.e.h.getExt(rtid)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperEncoder) HasExtensions() bool {
return len(f.e.h.extHandle) != 0
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil {
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
return true
}
return false
}
// ---------------- DECODER FOLLOWS -----------------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
return f.d.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinary() bool {
return f.d.be // f.d.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchBuffer() []byte {
return f.d.b[:]
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
return &f.d.b
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
// println(">>>>>>>>> DecFallback")
rv := reflect.ValueOf(iv)
if chkPtr {
rv = f.d.ensureDecodeable(rv)
}
f.d.decodeValue(rv, nil, false)
// f.d.decodeValueFallback(rv)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
return f.d.decSliceHelperStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
f.d.structFieldNotFound(index, name)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
f.d.arrayCannotExpand(sliceLen, streamLen)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes())
if fnerr != nil {
panic(fnerr)
}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
// bs := f.dd.DecodeStringAsBytes()
// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
if fnerr != nil {
panic(fnerr)
}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true))
if fnerr != nil {
panic(fnerr)
}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: builtin no longer supported - so we make this method a no-op,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return }
// func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
// // Note: builtin is no longer supported - so make this a no-op
// if _, ok := f.d.hh.(*BincHandle); ok {
// return timeTypId
// }
// return 0
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) IsJSONHandle() bool {
return f.d.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) I2Rtid(v interface{}) uintptr {
return i2rtid(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
return f.d.h.getExt(rtid)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperDecoder) HasExtensions() bool {
return len(f.d.h.extHandle) != 0
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil {
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
return true
}
return false
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
return decInferLen(clen, maxlen, unit)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: no longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) }

View file

@ -1,164 +0,0 @@
// +build codecgen.exec
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
const genDecMapTmpl = `
{{var "v"}} := *{{ .Varname }}
{{var "l"}} := r.ReadMapStart()
{{var "bh"}} := z.DecBasicHandle()
if {{var "v"}} == nil {
{{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
*{{ .Varname }} = {{var "v"}}
}
var {{var "mk"}} {{ .KTyp }}
var {{var "mv"}} {{ .Typ }}
var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
if {{var "bh"}}.MapValueReset {
{{if decElemKindPtr}}{{var "mg"}} = true
{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
{{else if not decElemKindImmutable}}{{var "mg"}} = true
{{end}} }
if {{var "l"}} != 0 {
{{var "hl"}} := {{var "l"}} > 0
for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}}
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
}{{ end }}{{if decElemKindPtr}}
{{var "ms"}} = true{{end}}
if {{var "mg"}} {
{{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
if {{var "mok"}} {
{{var "ms"}} = false
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}}
{{var "mdn"}} = false
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }}
if {{var "mdn"}} {
if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} }
} else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} // else len==0: TODO: Should we clear map entries?
r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}}
`
const genDecListTmpl = `
{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
var {{var "c"}} bool {{/* // changed */}}
_ = {{var "c"}}{{end}}
if {{var "l"}} == 0 {
{{if isSlice }}if {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{}
{{var "c"}} = true
} else if len({{var "v"}}) != 0 {
{{var "v"}} = {{var "v"}}[:0]
{{var "c"}} = true
} {{else if isChan }}if {{var "v"}} == nil {
{{var "v"}} = make({{ .CTyp }}, 0)
{{var "c"}} = true
} {{end}}
} else {
{{var "hl"}} := {{var "l"}} > 0
var {{var "rl"}} int
_ = {{var "rl"}}
{{if isSlice }} if {{var "hl"}} {
if {{var "l"}} > cap({{var "v"}}) {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
if {{var "rl"}} <= cap({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
} else {
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
}
{{var "c"}} = true
} else if {{var "l"}} != len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "l"}}]
{{var "c"}} = true
}
} {{end}}
var {{var "j"}} int
// var {{var "dn"}} bool
for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
{{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
if {{var "hl"}} {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
} else {
{{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
}
{{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
{{var "c"}} = true
}{{end}}
{{var "h"}}.ElemContainerState({{var "j"}})
{{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}}
{{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
{{ decLineVar $x }}
{{var "v"}} <- {{ $x }}
// println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this
{{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}}
var {{var "db"}} bool
if {{var "j"}} >= len({{var "v"}}) {
{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
{{var "c"}} = true
{{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
{{end}}
}
if {{var "db"}} {
z.DecSwallow()
} else {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
}
{{end}}
}
{{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "j"}}]
{{var "c"}} = true
} else if {{var "j"}} == 0 && {{var "v"}} == nil {
{{var "v"}} = make([]{{ .Typ }}, 0)
{{var "c"}} = true
} {{end}}
}
{{var "h"}}.End()
{{if not isArray }}if {{var "c"}} {
*{{ .Varname }} = {{var "v"}}
}{{end}}
`
const genEncChanTmpl = `
{{.Label}}:
switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; {
case timeout{{.Sfx}} == 0: // only consume available
for {
select {
case b{{.Sfx}} := <-{{.Chan}}:
{{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
default:
break {{.Label}}
}
}
case timeout{{.Sfx}} > 0: // consume until timeout
tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
for {
select {
case b{{.Sfx}} := <-{{.Chan}}:
{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
case <-tt{{.Sfx}}.C:
// close(tt.C)
break {{.Label}}
}
}
default: // consume until close
for b{{.Sfx}} := range {{.Chan}} {
{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
}
}
`

File diff suppressed because it is too large Load diff

View file

@ -1,14 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.5
package codec
import "reflect"
const reflectArrayOfSupported = true
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
return reflect.ArrayOf(count, elem)
}

View file

@ -1,14 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.5
package codec
import "reflect"
const reflectArrayOfSupported = false
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
panic("codec: reflect.ArrayOf unsupported in this go version")
}

View file

@ -1,15 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.9
package codec
import "reflect"
func makeMapReflect(t reflect.Type, size int) reflect.Value {
if size < 0 {
return reflect.MakeMapWithSize(t, 4)
}
return reflect.MakeMapWithSize(t, size)
}

View file

@ -1,12 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.9
package codec
import "reflect"
func makeMapReflect(t reflect.Type, size int) reflect.Value {
return reflect.MakeMap(t)
}

View file

@ -1,8 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.10
package codec
const allowSetUnexportedEmbeddedPtr = false

View file

@ -1,8 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.10
package codec
const allowSetUnexportedEmbeddedPtr = true

View file

@ -1,17 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.4
package codec
// This codec package will only work for go1.4 and above.
// This is for the following reasons:
// - go 1.4 was released in 2014
// - go runtime is written fully in go
// - interface only holds pointers
// - reflect.Value is stabilized as 3 words
func init() {
panic("codec: go 1.3 and below are not supported")
}

View file

@ -1,10 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.5,!go1.6
package codec
import "os"
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"

View file

@ -1,10 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.6,!go1.7
package codec
import "os"
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"

View file

@ -1,8 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.7
package codec
const genCheckVendor = true

View file

@ -1,8 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.5
package codec
var genCheckVendor = false

File diff suppressed because it is too large Load diff

View file

@ -1,121 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
// All non-std package dependencies live in this file,
// so porting to different environment is easy (just update functions).
func pruneSignExt(v []byte, pos bool) (n int) {
if len(v) < 2 {
} else if pos && v[0] == 0 {
for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
}
} else if !pos && v[0] == 0xff {
for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
}
}
return
}
// validate that this function is correct ...
// culled from OGRE (Object-Oriented Graphics Rendering Engine)
// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)
func halfFloatToFloatBits(yy uint16) (d uint32) {
y := uint32(yy)
s := (y >> 15) & 0x01
e := (y >> 10) & 0x1f
m := y & 0x03ff
if e == 0 {
if m == 0 { // plu or minus 0
return s << 31
}
// Denormalized number -- renormalize it
for (m & 0x00000400) == 0 {
m <<= 1
e -= 1
}
e += 1
const zz uint32 = 0x0400
m &= ^zz
} else if e == 31 {
if m == 0 { // Inf
return (s << 31) | 0x7f800000
}
return (s << 31) | 0x7f800000 | (m << 13) // NaN
}
e = e + (127 - 15)
m = m << 13
return (s << 31) | (e << 23) | m
}
// GrowCap will return a new capacity for a slice, given the following:
// - oldCap: current capacity
// - unit: in-memory size of an element
// - num: number of elements to add
func growCap(oldCap, unit, num int) (newCap int) {
// appendslice logic (if cap < 1024, *2, else *1.25):
// leads to many copy calls, especially when copying bytes.
// bytes.Buffer model (2*cap + n): much better for bytes.
// smarter way is to take the byte-size of the appended element(type) into account
// maintain 3 thresholds:
// t1: if cap <= t1, newcap = 2x
// t2: if cap <= t2, newcap = 1.75x
// t3: if cap <= t3, newcap = 1.5x
// else newcap = 1.25x
//
// t1, t2, t3 >= 1024 always.
// i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same)
//
// With this, appending for bytes increase by:
// 100% up to 4K
// 75% up to 8K
// 50% up to 16K
// 25% beyond that
// unit can be 0 e.g. for struct{}{}; handle that appropriately
var t1, t2, t3 int // thresholds
if unit <= 1 {
t1, t2, t3 = 4*1024, 8*1024, 16*1024
} else if unit < 16 {
t3 = 16 / unit * 1024
t1 = t3 * 1 / 4
t2 = t3 * 2 / 4
} else {
t1, t2, t3 = 1024, 1024, 1024
}
var x int // temporary variable
// x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively
if oldCap <= t1 { // [0,t1]
x = 8
} else if oldCap > t3 { // (t3,infinity]
x = 5
} else if oldCap <= t2 { // (t1,t2]
x = 7
} else { // (t2,t3]
x = 6
}
newCap = x * oldCap / 4
if num > 0 {
newCap += num
}
// ensure newCap is a multiple of 64 (if it is > 64) or 16.
if newCap > 64 {
if x = newCap % 64; x != 0 {
x = newCap / 64
newCap = 64 * (x + 1)
}
} else {
if x = newCap % 16; x != 0 {
x = newCap / 16
newCap = 16 * (x + 1)
}
}
return
}

View file

@ -1,272 +0,0 @@
// +build !go1.7 safe appengine
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"reflect"
"sync/atomic"
"time"
)
const safeMode = true
// stringView returns a view of the []byte as a string.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
//
// Usage: Always maintain a reference to v while result of this call is in use,
// and call keepAlive4BytesView(v) at point where done with view.
func stringView(v []byte) string {
return string(v)
}
// bytesView returns a view of the string as a []byte.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
//
// Usage: Always maintain a reference to v while result of this call is in use,
// and call keepAlive4BytesView(v) at point where done with view.
func bytesView(v string) []byte {
return []byte(v)
}
func definitelyNil(v interface{}) bool {
// this is a best-effort option.
// We just return false, so we don't unnecessarily incur the cost of reflection this early.
return false
}
func rv2i(rv reflect.Value) interface{} {
return rv.Interface()
}
func rt2id(rt reflect.Type) uintptr {
return reflect.ValueOf(rt).Pointer()
}
func rv2rtid(rv reflect.Value) uintptr {
return reflect.ValueOf(rv.Type()).Pointer()
}
func i2rtid(i interface{}) uintptr {
return reflect.ValueOf(reflect.TypeOf(i)).Pointer()
}
// --------------------------
func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
switch v.Kind() {
case reflect.Invalid:
return true
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
if deref {
if v.IsNil() {
return true
}
return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
}
return v.IsNil()
case reflect.Struct:
return isEmptyStruct(v, tinfos, deref, checkStruct)
}
return false
}
// --------------------------
// type ptrToRvMap struct{}
// func (*ptrToRvMap) init() {}
// func (*ptrToRvMap) get(i interface{}) reflect.Value {
// return reflect.ValueOf(i).Elem()
// }
// --------------------------
type atomicTypeInfoSlice struct { // expected to be 2 words
v atomic.Value
}
func (x *atomicTypeInfoSlice) load() []rtid2ti {
i := x.v.Load()
if i == nil {
return nil
}
return i.([]rtid2ti)
}
func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
x.v.Store(p)
}
// --------------------------
func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
rv.SetBytes(d.rawBytes())
}
func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
rv.SetString(d.d.DecodeString())
}
func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
rv.SetBool(d.d.DecodeBool())
}
func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) {
rv.Set(reflect.ValueOf(d.d.DecodeTime()))
}
func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
fv := d.d.DecodeFloat64()
if chkOvf.Float32(fv) {
d.errorf("float32 overflow: %v", fv)
}
rv.SetFloat(fv)
}
func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
rv.SetFloat(d.d.DecodeFloat64())
}
func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
}
func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 8))
}
func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 16))
}
func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 32))
}
func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(d.d.DecodeInt64())
}
func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
}
func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
}
func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 8))
}
func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 16))
}
func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 32))
}
func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(d.d.DecodeUint64())
}
// ----------------
func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeBool(rv.Bool())
}
func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeTime(rv2i(rv).(time.Time))
}
func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeString(cUTF8, rv.String())
}
func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeFloat64(rv.Float())
}
func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeFloat32(float32(rv.Float()))
}
func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
// // keepAlive4BytesView maintains a reference to the input parameter for bytesView.
// //
// // Usage: call this at point where done with the bytes view.
// func keepAlive4BytesView(v string) {}
// // keepAlive4BytesView maintains a reference to the input parameter for stringView.
// //
// // Usage: call this at point where done with the string view.
// func keepAlive4StringView(v []byte) {}
// func definitelyNil(v interface{}) bool {
// rv := reflect.ValueOf(v)
// switch rv.Kind() {
// case reflect.Invalid:
// return true
// case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func:
// return rv.IsNil()
// default:
// return false
// }
// }

View file

@ -1,639 +0,0 @@
// +build !safe
// +build !appengine
// +build go1.7
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"reflect"
"sync/atomic"
"time"
"unsafe"
)
// This file has unsafe variants of some helper methods.
// NOTE: See helper_not_unsafe.go for the usage information.
// var zeroRTv [4]uintptr
const safeMode = false
const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go
type unsafeString struct {
Data unsafe.Pointer
Len int
}
type unsafeSlice struct {
Data unsafe.Pointer
Len int
Cap int
}
type unsafeIntf struct {
typ unsafe.Pointer
word unsafe.Pointer
}
type unsafeReflectValue struct {
typ unsafe.Pointer
ptr unsafe.Pointer
flag uintptr
}
func stringView(v []byte) string {
if len(v) == 0 {
return ""
}
bx := (*unsafeSlice)(unsafe.Pointer(&v))
return *(*string)(unsafe.Pointer(&unsafeString{bx.Data, bx.Len}))
}
func bytesView(v string) []byte {
if len(v) == 0 {
return zeroByteSlice
}
sx := (*unsafeString)(unsafe.Pointer(&v))
return *(*[]byte)(unsafe.Pointer(&unsafeSlice{sx.Data, sx.Len, sx.Len}))
}
func definitelyNil(v interface{}) bool {
// There is no global way of checking if an interface is nil.
// For true references (map, ptr, func, chan), you can just look
// at the word of the interface. However, for slices, you have to dereference
// the word, and get a pointer to the 3-word interface value.
//
// However, the following are cheap calls
// - TypeOf(interface): cheap 2-line call.
// - ValueOf(interface{}): expensive
// - type.Kind: cheap call through an interface
// - Value.Type(): cheap call
// except it's a method value (e.g. r.Read, which implies that it is a Func)
return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil
}
func rv2i(rv reflect.Value) interface{} {
// TODO: consider a more generally-known optimization for reflect.Value ==> Interface
//
// Currently, we use this fragile method that taps into implememtation details from
// the source go stdlib reflect/value.go, and trims the implementation.
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir
var ptr unsafe.Pointer
if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 {
ptr = *(*unsafe.Pointer)(urv.ptr)
} else {
ptr = urv.ptr
}
return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr}))
}
func rt2id(rt reflect.Type) uintptr {
return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
}
func rv2rtid(rv reflect.Value) uintptr {
return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ)
}
func i2rtid(i interface{}) uintptr {
return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ)
}
// --------------------------
func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
if urv.flag == 0 {
return true
}
switch v.Kind() {
case reflect.Invalid:
return true
case reflect.String:
return (*unsafeString)(urv.ptr).Len == 0
case reflect.Slice:
return (*unsafeSlice)(urv.ptr).Len == 0
case reflect.Bool:
return !*(*bool)(urv.ptr)
case reflect.Int:
return *(*int)(urv.ptr) == 0
case reflect.Int8:
return *(*int8)(urv.ptr) == 0
case reflect.Int16:
return *(*int16)(urv.ptr) == 0
case reflect.Int32:
return *(*int32)(urv.ptr) == 0
case reflect.Int64:
return *(*int64)(urv.ptr) == 0
case reflect.Uint:
return *(*uint)(urv.ptr) == 0
case reflect.Uint8:
return *(*uint8)(urv.ptr) == 0
case reflect.Uint16:
return *(*uint16)(urv.ptr) == 0
case reflect.Uint32:
return *(*uint32)(urv.ptr) == 0
case reflect.Uint64:
return *(*uint64)(urv.ptr) == 0
case reflect.Uintptr:
return *(*uintptr)(urv.ptr) == 0
case reflect.Float32:
return *(*float32)(urv.ptr) == 0
case reflect.Float64:
return *(*float64)(urv.ptr) == 0
case reflect.Interface:
isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
if deref {
if isnil {
return true
}
return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
}
return isnil
case reflect.Ptr:
// isnil := urv.ptr == nil (not sufficient, as a pointer value encodes the type)
isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
if deref {
if isnil {
return true
}
return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
}
return isnil
case reflect.Struct:
return isEmptyStruct(v, tinfos, deref, checkStruct)
case reflect.Map, reflect.Array, reflect.Chan:
return v.Len() == 0
}
return false
}
// --------------------------
// atomicTypeInfoSlice contains length and pointer to the array for a slice.
// It is expected to be 2 words.
//
// Previously, we atomically loaded and stored the length and array pointer separately,
// which could lead to some races.
// We now just atomically store and load the pointer to the value directly.
type atomicTypeInfoSlice struct { // expected to be 2 words
l int // length of the data array (must be first in struct, for 64-bit alignment necessary for 386)
v unsafe.Pointer // data array - Pointer (not uintptr) to maintain GC reference
}
func (x *atomicTypeInfoSlice) load() []rtid2ti {
xp := unsafe.Pointer(x)
x2 := *(*atomicTypeInfoSlice)(atomic.LoadPointer(&xp))
if x2.l == 0 {
return nil
}
return *(*[]rtid2ti)(unsafe.Pointer(&unsafeSlice{Data: x2.v, Len: x2.l, Cap: x2.l}))
}
func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
s := (*unsafeSlice)(unsafe.Pointer(&p))
xp := unsafe.Pointer(x)
atomic.StorePointer(&xp, unsafe.Pointer(&atomicTypeInfoSlice{l: s.Len, v: s.Data}))
}
// --------------------------
func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*[]byte)(urv.ptr) = d.rawBytes()
}
func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*string)(urv.ptr) = d.d.DecodeString()
}
func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*bool)(urv.ptr) = d.d.DecodeBool()
}
func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*time.Time)(urv.ptr) = d.d.DecodeTime()
}
func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
fv := d.d.DecodeFloat64()
if chkOvf.Float32(fv) {
d.errorf("float32 overflow: %v", fv)
}
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*float32)(urv.ptr) = float32(fv)
}
func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*float64)(urv.ptr) = d.d.DecodeFloat64()
}
func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int)(urv.ptr) = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
}
func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int8)(urv.ptr) = int8(chkOvf.IntV(d.d.DecodeInt64(), 8))
}
func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int16)(urv.ptr) = int16(chkOvf.IntV(d.d.DecodeInt64(), 16))
}
func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int32)(urv.ptr) = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
}
func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int64)(urv.ptr) = d.d.DecodeInt64()
}
func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint)(urv.ptr) = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
}
func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uintptr)(urv.ptr) = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
}
func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint8)(urv.ptr) = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
}
func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint16)(urv.ptr) = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))
}
func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint32)(urv.ptr) = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))
}
func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint64)(urv.ptr) = d.d.DecodeUint64()
}
// ------------
func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeBool(*(*bool)(v.ptr))
}
func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeTime(*(*time.Time)(v.ptr))
}
func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeString(cUTF8, *(*string)(v.ptr))
}
func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeFloat64(*(*float64)(v.ptr))
}
func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeFloat32(*(*float32)(v.ptr))
}
func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int)(v.ptr)))
}
func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int8)(v.ptr)))
}
func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int16)(v.ptr)))
}
func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int32)(v.ptr)))
}
func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int64)(v.ptr)))
}
func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint)(v.ptr)))
}
func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint8)(v.ptr)))
}
func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint16)(v.ptr)))
}
func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint32)(v.ptr)))
}
func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint64)(v.ptr)))
}
func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uintptr)(v.ptr)))
}
// ------------
// func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// // if urv.flag&unsafeFlagIndir != 0 {
// // urv.ptr = *(*unsafe.Pointer)(urv.ptr)
// // }
// *(*[]byte)(urv.ptr) = d.rawBytes()
// }
// func rv0t(rt reflect.Type) reflect.Value {
// ut := (*unsafeIntf)(unsafe.Pointer(&rt))
// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr
// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())}
// return *(*reflect.Value)(unsafe.Pointer(&uv})
// }
// func rv2i(rv reflect.Value) interface{} {
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir
// var ptr unsafe.Pointer
// // kk := reflect.Kind(urv.flag & (1<<5 - 1))
// // if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 {
// if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 {
// ptr = *(*unsafe.Pointer)(urv.ptr)
// } else {
// ptr = urv.ptr
// }
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr}))
// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
// }
// func definitelyNil(v interface{}) bool {
// var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v))
// if ui.word == nil {
// return true
// }
// var tk = reflect.TypeOf(v).Kind()
// return (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.word) == nil
// fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n",
// v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil)
// }
// func keepAlive4BytesView(v string) {
// runtime.KeepAlive(v)
// }
// func keepAlive4StringView(v []byte) {
// runtime.KeepAlive(v)
// }
// func rt2id(rt reflect.Type) uintptr {
// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
// // var i interface{} = rt
// // // ui := (*unsafeIntf)(unsafe.Pointer(&i))
// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word
// }
// func rv2i(rv reflect.Value) interface{} {
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// // non-reference type: already indir
// // reference type: depend on flagIndir property ('cos maybe was double-referenced)
// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 )
// // rvk := reflect.Kind(urv.flag & (1<<5 - 1))
// // if (rvk == reflect.Chan ||
// // rvk == reflect.Func ||
// // rvk == reflect.Interface ||
// // rvk == reflect.Map ||
// // rvk == reflect.Ptr ||
// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 {
// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type())
// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
// // }
// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 {
// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type())
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
// }
// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type())
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
// }
// const (
// unsafeRvFlagKindMask = 1<<5 - 1
// unsafeRvKindDirectIface = 1 << 5
// unsafeRvFlagIndir = 1 << 7
// unsafeRvFlagAddr = 1 << 8
// unsafeRvFlagMethod = 1 << 9
// _USE_RV_INTERFACE bool = false
// _UNSAFE_RV_DEBUG = true
// )
// type unsafeRtype struct {
// _ [2]uintptr
// _ uint32
// _ uint8
// _ uint8
// _ uint8
// kind uint8
// _ [2]uintptr
// _ int32
// }
// func _rv2i(rv reflect.Value) interface{} {
// // Note: From use,
// // - it's never an interface
// // - the only calls here are for ifaceIndir types.
// // (though that conditional is wrong)
// // To know for sure, we need the value of t.kind (which is not exposed).
// //
// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct)
// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string
// // - Type Direct, Value indirect: ==> map???
// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map
// //
// // TRANSLATES TO:
// // if typeIndirect { } else if valueIndirect { } else { }
// //
// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored.
// if _USE_RV_INTERFACE {
// return rv.Interface()
// }
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) {
// // println("***** IS flag method or interface: delegating to rv.Interface()")
// // return rv.Interface()
// // }
// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) {
// // println("***** IS Interface: delegate to rv.Interface")
// // return rv.Interface()
// // }
// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 {
// // if urv.flag&unsafeRvFlagAddr == 0 {
// // println("***** IS ifaceIndir typ")
// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ}
// // // return *(*interface{})(unsafe.Pointer(&ui))
// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
// // }
// // } else if urv.flag&unsafeRvFlagIndir != 0 {
// // println("***** IS flagindir")
// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
// // } else {
// // println("***** NOT flagindir")
// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
// // }
// // println("***** default: delegate to rv.Interface")
// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ))
// if _UNSAFE_RV_DEBUG {
// fmt.Printf(">>>> start: %v: ", rv.Type())
// fmt.Printf("%v - %v\n", *urv, *urt)
// }
// if urt.kind&unsafeRvKindDirectIface == 0 {
// if _UNSAFE_RV_DEBUG {
// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type())
// }
// // println("***** IS ifaceIndir typ")
// // if true || urv.flag&unsafeRvFlagAddr == 0 {
// // // println(" ***** IS NOT addr")
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
// // }
// } else if urv.flag&unsafeRvFlagIndir != 0 {
// if _UNSAFE_RV_DEBUG {
// fmt.Printf("**** +flagIndir type: %v\n", rv.Type())
// }
// // println("***** IS flagindir")
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
// } else {
// if _UNSAFE_RV_DEBUG {
// fmt.Printf("**** -flagIndir type: %v\n", rv.Type())
// }
// // println("***** NOT flagindir")
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
// }
// // println("***** default: delegating to rv.Interface()")
// // return rv.Interface()
// }
// var staticM0 = make(map[string]uint64)
// var staticI0 = (int32)(-5)
// func staticRv2iTest() {
// i0 := (int32)(-5)
// m0 := make(map[string]uint16)
// m0["1"] = 1
// for _, i := range []interface{}{
// (int)(7),
// (uint)(8),
// (int16)(-9),
// (uint16)(19),
// (uintptr)(77),
// (bool)(true),
// float32(-32.7),
// float64(64.9),
// complex(float32(19), 5),
// complex(float64(-32), 7),
// [4]uint64{1, 2, 3, 4},
// (chan<- int)(nil), // chan,
// rv2i, // func
// io.Writer(ioutil.Discard),
// make(map[string]uint),
// (map[string]uint)(nil),
// staticM0,
// m0,
// &m0,
// i0,
// &i0,
// &staticI0,
// &staticM0,
// []uint32{6, 7, 8},
// "abc",
// Raw{},
// RawExt{},
// &Raw{},
// &RawExt{},
// unsafe.Pointer(&i0),
// } {
// i2 := rv2i(reflect.ValueOf(i))
// eq := reflect.DeepEqual(i, i2)
// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq)
// }
// // os.Exit(0)
// }
// func init() {
// staticRv2iTest()
// }
// func rv2i(rv reflect.Value) interface{} {
// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() {
// return rv.Interface()
// }
// // var i interface{}
// // ui := (*unsafeIntf)(unsafe.Pointer(&i))
// var ui unsafeIntf
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr))
// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 {
// if urv.flag&unsafeRvFlagAddr != 0 {
// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()")
// return rv.Interface()
// }
// println("****** indirect type/kind")
// ui.word = urv.ptr
// } else if urv.flag&unsafeRvFlagIndir != 0 {
// println("****** unsafe rv flag indir")
// ui.word = *(*unsafe.Pointer)(urv.ptr)
// } else {
// println("****** default: assign prt to word directly")
// ui.word = urv.ptr
// }
// // ui.word = urv.ptr
// ui.typ = urv.typ
// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word)
// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word))
// return *(*interface{})(unsafe.Pointer(&ui))
// // return i
// }

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,232 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"bufio"
"errors"
"io"
"net/rpc"
"sync"
)
// Rpc provides a rpc Server or Client Codec for rpc communication.
type Rpc interface {
ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
}
// RPCOptions holds options specific to rpc functionality
type RPCOptions struct {
// RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls.
//
// Set RPCNoBuffer=true to turn buffering off.
// Buffering can still be done if buffered connections are passed in, or
// buffering is configured on the handle.
RPCNoBuffer bool
}
// rpcCodec defines the struct members and common methods.
type rpcCodec struct {
c io.Closer
r io.Reader
w io.Writer
f ioFlusher
dec *Decoder
enc *Encoder
// bw *bufio.Writer
// br *bufio.Reader
mu sync.Mutex
h Handle
cls bool
clsmu sync.RWMutex
clsErr error
}
func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
// return newRPCCodec2(bufio.NewReader(conn), bufio.NewWriter(conn), conn, h)
return newRPCCodec2(conn, conn, conn, h)
}
func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec {
// defensive: ensure that jsonH has TermWhitespace turned on.
if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace {
panic(errors.New("rpc requires a JsonHandle with TermWhitespace set to true"))
}
// always ensure that we use a flusher, and always flush what was written to the connection.
// we lose nothing by using a buffered writer internally.
f, ok := w.(ioFlusher)
bh := h.getBasicHandle()
if !bh.RPCNoBuffer {
if bh.WriterBufferSize <= 0 {
if !ok {
bw := bufio.NewWriter(w)
f, w = bw, bw
}
}
if bh.ReaderBufferSize <= 0 {
if _, ok = w.(ioPeeker); !ok {
if _, ok = w.(ioBuffered); !ok {
br := bufio.NewReader(r)
r = br
}
}
}
}
return rpcCodec{
c: c,
w: w,
r: r,
f: f,
h: h,
enc: NewEncoder(w, h),
dec: NewDecoder(r, h),
}
}
func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) {
if c.isClosed() {
return c.clsErr
}
err = c.enc.Encode(obj1)
if err == nil {
if writeObj2 {
err = c.enc.Encode(obj2)
}
// if err == nil && c.f != nil {
// err = c.f.Flush()
// }
}
if c.f != nil {
if err == nil {
err = c.f.Flush()
} else {
_ = c.f.Flush() // swallow flush error, so we maintain prior error on write
}
}
return
}
func (c *rpcCodec) swallow(err *error) {
defer panicToErr(c.dec, err)
c.dec.swallow()
}
func (c *rpcCodec) read(obj interface{}) (err error) {
if c.isClosed() {
return c.clsErr
}
//If nil is passed in, we should read and discard
if obj == nil {
// var obj2 interface{}
// return c.dec.Decode(&obj2)
c.swallow(&err)
return
}
return c.dec.Decode(obj)
}
func (c *rpcCodec) isClosed() (b bool) {
if c.c != nil {
c.clsmu.RLock()
b = c.cls
c.clsmu.RUnlock()
}
return
}
func (c *rpcCodec) Close() error {
if c.c == nil || c.isClosed() {
return c.clsErr
}
c.clsmu.Lock()
c.cls = true
c.clsErr = c.c.Close()
c.clsmu.Unlock()
return c.clsErr
}
func (c *rpcCodec) ReadResponseBody(body interface{}) error {
return c.read(body)
}
// -------------------------------------
type goRpcCodec struct {
rpcCodec
}
func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
// Must protect for concurrent access as per API
c.mu.Lock()
defer c.mu.Unlock()
return c.write(r, body, true)
}
func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
c.mu.Lock()
defer c.mu.Unlock()
return c.write(r, body, true)
}
func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
return c.read(r)
}
func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
return c.read(r)
}
func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
return c.read(body)
}
// -------------------------------------
// goRpc is the implementation of Rpc that uses the communication protocol
// as defined in net/rpc package.
type goRpc struct{}
// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
//
// Note: network connection (from net.Dial, of type io.ReadWriteCloser) is not buffered.
//
// For performance, you should configure WriterBufferSize and ReaderBufferSize on the handle.
// This ensures we use an adequate buffer during reading and writing.
// If not configured, we will internally initialize and use a buffer during reads and writes.
// This can be turned off via the RPCNoBuffer option on the Handle.
// var handle codec.JsonHandle
// handle.RPCNoBuffer = true // turns off attempt by rpc module to initialize a buffer
//
// Example 1: one way of configuring buffering explicitly:
// var handle codec.JsonHandle // codec handle
// handle.ReaderBufferSize = 1024
// handle.WriterBufferSize = 1024
// var conn io.ReadWriteCloser // connection got from a socket
// var serverCodec = GoRpc.ServerCodec(conn, handle)
// var clientCodec = GoRpc.ClientCodec(conn, handle)
//
// Example 2: you can also explicitly create a buffered connection yourself,
// and not worry about configuring the buffer sizes in the Handle.
// var handle codec.Handle // codec handle
// var conn io.ReadWriteCloser // connection got from a socket
// var bufconn = struct { // bufconn here is a buffered io.ReadWriteCloser
// io.Closer
// *bufio.Reader
// *bufio.Writer
// }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)}
// var serverCodec = GoRpc.ServerCodec(bufconn, handle)
// var clientCodec = GoRpc.ClientCodec(bufconn, handle)
//
var GoRpc goRpc
func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
return &goRpcCodec{newRPCCodec(conn, h)}
}
func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
return &goRpcCodec{newRPCCodec(conn, h)}
}

View file

@ -1,652 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"math"
"reflect"
"time"
)
const (
_ uint8 = iota
simpleVdNil = 1
simpleVdFalse = 2
simpleVdTrue = 3
simpleVdFloat32 = 4
simpleVdFloat64 = 5
// each lasts for 4 (ie n, n+1, n+2, n+3)
simpleVdPosInt = 8
simpleVdNegInt = 12
simpleVdTime = 24
// containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
simpleVdString = 216
simpleVdByteArray = 224
simpleVdArray = 232
simpleVdMap = 240
simpleVdExt = 248
)
type simpleEncDriver struct {
noBuiltInTypes
// encNoSeparator
e *Encoder
h *SimpleHandle
w encWriter
b [8]byte
// c containerState
encDriverTrackContainerWriter
// encDriverNoopContainerWriter
_ [2]uint64 // padding
}
func (e *simpleEncDriver) EncodeNil() {
e.w.writen1(simpleVdNil)
}
func (e *simpleEncDriver) EncodeBool(b bool) {
if e.h.EncZeroValuesAsNil && e.c != containerMapKey && !b {
e.EncodeNil()
return
}
if b {
e.w.writen1(simpleVdTrue)
} else {
e.w.writen1(simpleVdFalse)
}
}
func (e *simpleEncDriver) EncodeFloat32(f float32) {
if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 {
e.EncodeNil()
return
}
e.w.writen1(simpleVdFloat32)
bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
}
func (e *simpleEncDriver) EncodeFloat64(f float64) {
if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 {
e.EncodeNil()
return
}
e.w.writen1(simpleVdFloat64)
bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f))
}
func (e *simpleEncDriver) EncodeInt(v int64) {
if v < 0 {
e.encUint(uint64(-v), simpleVdNegInt)
} else {
e.encUint(uint64(v), simpleVdPosInt)
}
}
func (e *simpleEncDriver) EncodeUint(v uint64) {
e.encUint(v, simpleVdPosInt)
}
func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == 0 {
e.EncodeNil()
return
}
if v <= math.MaxUint8 {
e.w.writen2(bd, uint8(v))
} else if v <= math.MaxUint16 {
e.w.writen1(bd + 1)
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
} else if v <= math.MaxUint32 {
e.w.writen1(bd + 2)
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
} else { // if v <= math.MaxUint64 {
e.w.writen1(bd + 3)
bigenHelper{e.b[:8], e.w}.writeUint64(v)
}
}
func (e *simpleEncDriver) encLen(bd byte, length int) {
if length == 0 {
e.w.writen1(bd)
} else if length <= math.MaxUint8 {
e.w.writen1(bd + 1)
e.w.writen1(uint8(length))
} else if length <= math.MaxUint16 {
e.w.writen1(bd + 2)
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length))
} else if int64(length) <= math.MaxUint32 {
e.w.writen1(bd + 3)
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length))
} else {
e.w.writen1(bd + 4)
bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length))
}
}
func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
bs := ext.WriteExt(rv)
if bs == nil {
e.EncodeNil()
return
}
e.encodeExtPreamble(uint8(xtag), len(bs))
e.w.writeb(bs)
}
func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
e.w.writeb(re.Data)
}
func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
e.encLen(simpleVdExt, length)
e.w.writen1(xtag)
}
func (e *simpleEncDriver) WriteArrayStart(length int) {
e.c = containerArrayStart
e.encLen(simpleVdArray, length)
}
func (e *simpleEncDriver) WriteMapStart(length int) {
e.c = containerMapStart
e.encLen(simpleVdMap, length)
}
func (e *simpleEncDriver) EncodeString(c charEncoding, v string) {
if false && e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == "" {
e.EncodeNil()
return
}
e.encLen(simpleVdString, len(v))
e.w.writestr(v)
}
// func (e *simpleEncDriver) EncodeSymbol(v string) {
// e.EncodeString(cUTF8, v)
// }
func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
// if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil {
if v == nil {
e.EncodeNil()
return
}
e.encLen(simpleVdByteArray, len(v))
e.w.writeb(v)
}
func (e *simpleEncDriver) EncodeTime(t time.Time) {
// if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() {
if t.IsZero() {
e.EncodeNil()
return
}
v, err := t.MarshalBinary()
if err != nil {
e.e.errorv(err)
return
}
// time.Time marshalbinary takes about 14 bytes.
e.w.writen2(simpleVdTime, uint8(len(v)))
e.w.writeb(v)
}
//------------------------------------
type simpleDecDriver struct {
d *Decoder
h *SimpleHandle
r decReader
bdRead bool
bd byte
br bool // a bytes reader?
c containerState
// b [scratchByteArrayLen]byte
noBuiltInTypes
// noStreamingCodec
decDriverNoopContainerReader
_ [3]uint64 // padding
}
func (d *simpleDecDriver) readNextBd() {
d.bd = d.r.readn1()
d.bdRead = true
}
func (d *simpleDecDriver) uncacheRead() {
if d.bdRead {
d.r.unreadn1()
d.bdRead = false
}
}
func (d *simpleDecDriver) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case simpleVdNil:
return valueTypeNil
case simpleVdByteArray, simpleVdByteArray + 1,
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
return valueTypeBytes
case simpleVdString, simpleVdString + 1,
simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
return valueTypeString
case simpleVdArray, simpleVdArray + 1,
simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
return valueTypeArray
case simpleVdMap, simpleVdMap + 1,
simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
return valueTypeMap
// case simpleVdTime:
// return valueTypeTime
}
// else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
// }
return valueTypeUnset
}
func (d *simpleDecDriver) TryDecodeAsNil() bool {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdNil {
d.bdRead = false
return true
}
return false
}
func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case simpleVdPosInt:
ui = uint64(d.r.readn1())
case simpleVdPosInt + 1:
ui = uint64(bigen.Uint16(d.r.readx(2)))
case simpleVdPosInt + 2:
ui = uint64(bigen.Uint32(d.r.readx(4)))
case simpleVdPosInt + 3:
ui = uint64(bigen.Uint64(d.r.readx(8)))
case simpleVdNegInt:
ui = uint64(d.r.readn1())
neg = true
case simpleVdNegInt + 1:
ui = uint64(bigen.Uint16(d.r.readx(2)))
neg = true
case simpleVdNegInt + 2:
ui = uint64(bigen.Uint32(d.r.readx(4)))
neg = true
case simpleVdNegInt + 3:
ui = uint64(bigen.Uint64(d.r.readx(8)))
neg = true
default:
d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
return
}
// don't do this check, because callers may only want the unsigned value.
// if ui > math.MaxInt64 {
// d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
// return
// }
return
}
func (d *simpleDecDriver) DecodeInt64() (i int64) {
ui, neg := d.decCheckInteger()
i = chkOvf.SignedIntV(ui)
if neg {
i = -i
}
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeUint64() (ui uint64) {
ui, neg := d.decCheckInteger()
if neg {
d.d.errorf("assigning negative signed value to unsigned type")
return
}
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeFloat64() (f float64) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdFloat32 {
f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
} else if d.bd == simpleVdFloat64 {
f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
} else {
if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
f = float64(d.DecodeInt64())
} else {
d.d.errorf("float only valid from float32/64: Invalid descriptor: %v", d.bd)
return
}
}
d.bdRead = false
return
}
// bool can be decoded from bool only (single byte).
func (d *simpleDecDriver) DecodeBool() (b bool) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdTrue {
b = true
} else if d.bd == simpleVdFalse {
} else {
d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd)
return
}
d.bdRead = false
return
}
func (d *simpleDecDriver) ReadMapStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
d.bdRead = false
d.c = containerMapStart
return d.decLen()
}
func (d *simpleDecDriver) ReadArrayStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
d.bdRead = false
d.c = containerArrayStart
return d.decLen()
}
func (d *simpleDecDriver) ReadArrayElem() {
d.c = containerArrayElem
}
func (d *simpleDecDriver) ReadArrayEnd() {
d.c = containerArrayEnd
}
func (d *simpleDecDriver) ReadMapElemKey() {
d.c = containerMapKey
}
func (d *simpleDecDriver) ReadMapElemValue() {
d.c = containerMapValue
}
func (d *simpleDecDriver) ReadMapEnd() {
d.c = containerMapEnd
}
func (d *simpleDecDriver) decLen() int {
switch d.bd % 8 {
case 0:
return 0
case 1:
return int(d.r.readn1())
case 2:
return int(bigen.Uint16(d.r.readx(2)))
case 3:
ui := uint64(bigen.Uint32(d.r.readx(4)))
if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("overflow integer: %v", ui)
return 0
}
return int(ui)
case 4:
ui := bigen.Uint64(d.r.readx(8))
if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("overflow integer: %v", ui)
return 0
}
return int(ui)
}
d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
return -1
}
func (d *simpleDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.d.b[:], true))
}
func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) {
return d.DecodeBytes(d.d.b[:], true)
}
func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdNil {
d.bdRead = false
return
}
// check if an "array" of uint8's (see ContainerType for how to infer if an array)
if d.bd >= simpleVdArray && d.bd <= simpleVdMap+4 {
if len(bs) == 0 && zerocopy {
bs = d.d.b[:]
}
bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
return
}
clen := d.decLen()
d.bdRead = false
if zerocopy {
if d.br {
return d.r.readx(clen)
} else if len(bs) == 0 {
bs = d.d.b[:]
}
}
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
}
func (d *simpleDecDriver) DecodeTime() (t time.Time) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdNil {
d.bdRead = false
return
}
if d.bd != simpleVdTime {
d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd)
return
}
d.bdRead = false
clen := int(d.r.readn1())
b := d.r.readx(clen)
if err := (&t).UnmarshalBinary(b); err != nil {
d.d.errorv(err)
}
return
}
func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if xtag > 0xff {
d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
return
}
realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
realxtag = uint64(realxtag1)
if ext == nil {
re := rv.(*RawExt)
re.Tag = realxtag
re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
} else {
ext.ReadExt(rv, xbs)
}
return
}
func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
l := d.decLen()
xtag = d.r.readn1()
if verifyTag && xtag != tag {
d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag)
return
}
xbs = d.r.readx(l)
case simpleVdByteArray, simpleVdByteArray + 1,
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
xbs = d.DecodeBytes(nil, true)
default:
d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd)
return
}
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeNaked() {
if !d.bdRead {
d.readNextBd()
}
n := d.d.n
var decodeFurther bool
switch d.bd {
case simpleVdNil:
n.v = valueTypeNil
case simpleVdFalse:
n.v = valueTypeBool
n.b = false
case simpleVdTrue:
n.v = valueTypeBool
n.b = true
case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
if d.h.SignedInteger {
n.v = valueTypeInt
n.i = d.DecodeInt64()
} else {
n.v = valueTypeUint
n.u = d.DecodeUint64()
}
case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
n.v = valueTypeInt
n.i = d.DecodeInt64()
case simpleVdFloat32:
n.v = valueTypeFloat
n.f = d.DecodeFloat64()
case simpleVdFloat64:
n.v = valueTypeFloat
n.f = d.DecodeFloat64()
case simpleVdTime:
n.v = valueTypeTime
n.t = d.DecodeTime()
case simpleVdString, simpleVdString + 1,
simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
n.v = valueTypeString
n.s = d.DecodeString()
case simpleVdByteArray, simpleVdByteArray + 1,
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false)
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
n.v = valueTypeExt
l := d.decLen()
n.u = uint64(d.r.readn1())
n.l = d.r.readx(l)
case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2,
simpleVdArray + 3, simpleVdArray + 4:
n.v = valueTypeArray
decodeFurther = true
case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
n.v = valueTypeMap
decodeFurther = true
default:
d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd)
}
if !decodeFurther {
d.bdRead = false
}
return
}
//------------------------------------
// SimpleHandle is a Handle for a very simple encoding format.
//
// simple is a simplistic codec similar to binc, but not as compact.
// - Encoding of a value is always preceded by the descriptor byte (bd)
// - True, false, nil are encoded fully in 1 byte (the descriptor)
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
// - Length of containers (strings, bytes, array, map, extensions)
// are encoded in 0, 1, 2, 4 or 8 bytes.
// Zero-length containers have no length encoded.
// For others, the number of bytes is given by pow(2, bd%3)
// - maps are encoded as [bd] [length] [[key][value]]...
// - arrays are encoded as [bd] [length] [value]...
// - extensions are encoded as [bd] [length] [tag] [byte]...
// - strings/bytearrays are encoded as [bd] [length] [byte]...
// - time.Time are encoded as [bd] [length] [byte]...
//
// The full spec will be published soon.
type SimpleHandle struct {
BasicHandle
binaryEncodingType
noElemSeparators
// EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil
EncZeroValuesAsNil bool
// _ [1]uint64 // padding
}
// Name returns the name of the handle: simple
func (h *SimpleHandle) Name() string { return "simple" }
// SetBytesExt sets an extension
func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}})
}
func (h *SimpleHandle) hasElemSeparators() bool { return true } // as it implements Write(Map|Array)XXX
func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver {
return &simpleEncDriver{e: e, w: e.w, h: h}
}
func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver {
return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes}
}
func (e *simpleEncDriver) reset() {
e.c = 0
e.w = e.e.w
}
func (d *simpleDecDriver) reset() {
d.c = 0
d.r, d.br = d.d.r, d.d.bytes
d.bd, d.bdRead = 0, false
}
var _ decDriver = (*simpleDecDriver)(nil)
var _ encDriver = (*simpleEncDriver)(nil)

View file

@ -1,508 +0,0 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build ignore
package codec
import "reflect"
/*
A strict Non-validating namespace-aware XML 1.0 parser and (en|de)coder.
We are attempting this due to perceived issues with encoding/xml:
- Complicated. It tried to do too much, and is not as simple to use as json.
- Due to over-engineering, reflection is over-used AND performance suffers:
java is 6X faster:http://fabsk.eu/blog/category/informatique/dev/golang/
even PYTHON performs better: http://outgoing.typepad.com/outgoing/2014/07/exploring-golang.html
codec framework will offer the following benefits
- VASTLY improved performance (when using reflection-mode or codecgen)
- simplicity and consistency: with the rest of the supported formats
- all other benefits of codec framework (streaming, codegeneration, etc)
codec is not a drop-in replacement for encoding/xml.
It is a replacement, based on the simplicity and performance of codec.
Look at it like JAXB for Go.
Challenges:
- Need to output XML preamble, with all namespaces at the right location in the output.
- Each "end" block is dynamic, so we need to maintain a context-aware stack
- How to decide when to use an attribute VS an element
- How to handle chardata, attr, comment EXPLICITLY.
- Should it output fragments?
e.g. encoding a bool should just output true OR false, which is not well-formed XML.
Extend the struct tag. See representative example:
type X struct {
ID uint8 `codec:"http://ugorji.net/x-namespace xid id,omitempty,toarray,attr,cdata"`
// format: [namespace-uri ][namespace-prefix ]local-name, ...
}
Based on this, we encode
- fields as elements, BUT
encode as attributes if struct tag contains ",attr" and is a scalar (bool, number or string)
- text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata".
To handle namespaces:
- XMLHandle is denoted as being namespace-aware.
Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name.
- *Encoder and *Decoder know whether the Handle "prefers" namespaces.
- add *Encoder.getEncName(*structFieldInfo).
No one calls *structFieldInfo.indexForEncName directly anymore
- OR better yet: indexForEncName is namespace-aware, and helper.go is all namespace-aware
indexForEncName takes a parameter of the form namespace:local-name OR local-name
- add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc
by being a method on *Decoder, or maybe a method on the Handle itself.
No one accesses .encName anymore
- let encode.go and decode.go use these (for consistency)
- only problem exists for gen.go, where we create a big switch on encName.
Now, we also have to add a switch on strings.endsWith(kName, encNsName)
- gen.go will need to have many more methods, and then double-on the 2 switch loops like:
switch k {
case "abc" : x.abc()
case "def" : x.def()
default {
switch {
case !nsAware: panic(...)
case strings.endsWith(":abc"): x.abc()
case strings.endsWith(":def"): x.def()
default: panic(...)
}
}
}
The structure below accommodates this:
type typeInfo struct {
sfi []*structFieldInfo // sorted by encName
sfins // sorted by namespace
sfia // sorted, to have those with attributes at the top. Needed to write XML appropriately.
sfip // unsorted
}
type structFieldInfo struct {
encName
nsEncName
ns string
attr bool
cdata bool
}
indexForEncName is now an internal helper function that takes a sorted array
(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...)
There will be a separate parser from the builder.
The parser will have a method: next() xmlToken method. It has lookahead support,
so you can pop multiple tokens, make a determination, and push them back in the order popped.
This will be needed to determine whether we are "nakedly" decoding a container or not.
The stack will be implemented using a slice and push/pop happens at the [0] element.
xmlToken has fields:
- type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text
- value string
- ns string
SEE: http://www.xml.com/pub/a/98/10/guide0.html?page=3#ENTDECL
The following are skipped when parsing:
- External Entities (from external file)
- Notation Declaration e.g. <!NOTATION GIF87A SYSTEM "GIF">
- Entity Declarations & References
- XML Declaration (assume UTF-8)
- XML Directive i.e. <! ... >
- Other Declarations: Notation, etc.
- Comment
- Processing Instruction
- schema / DTD for validation:
We are not a VALIDATING parser. Validation is done elsewhere.
However, some parts of the DTD internal subset are used (SEE BELOW).
For Attribute List Declarations e.g.
<!ATTLIST foo:oldjoke name ID #REQUIRED label CDATA #IMPLIED status ( funny | notfunny ) 'funny' >
We considered using the ATTLIST to get "default" value, but not to validate the contents. (VETOED)
The following XML features are supported
- Namespace
- Element
- Attribute
- cdata
- Unicode escape
The following DTD (when as an internal sub-set) features are supported:
- Internal Entities e.g.
<!ELEMENT burns "ugorji is cool" > AND entities for the set: [<>&"']
- Parameter entities e.g.
<!ENTITY % personcontent "ugorji is cool"> <!ELEMENT burns (%personcontent;)*>
At decode time, a structure containing the following is kept
- namespace mapping
- default attribute values
- all internal entities (<>&"' and others written in the document)
When decode starts, it parses XML namespace declarations and creates a map in the
xmlDecDriver. While parsing, that map continuously gets updated.
The only problem happens when a namespace declaration happens on the node that it defines.
e.g. <hn:name xmlns:hn="http://www.ugorji.net" >
To handle this, each Element must be fully parsed at a time,
even if it amounts to multiple tokens which are returned one at a time on request.
xmlns is a special attribute name.
- It is used to define namespaces, including the default
- It is never returned as an AttrKey or AttrVal.
*We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.*
Number, bool, null, mapKey, etc can all be decoded from any xmlToken.
This accommodates map[int]string for example.
It should be possible to create a schema from the types,
or vice versa (generate types from schema with appropriate tags).
This is however out-of-scope from this parsing project.
We should write all namespace information at the first point that it is referenced in the tree,
and use the mapping for all child nodes and attributes. This means that state is maintained
at a point in the tree. This also means that calls to Decode or MustDecode will reset some state.
When decoding, it is important to keep track of entity references and default attribute values.
It seems these can only be stored in the DTD components. We should honor them when decoding.
Configuration for XMLHandle will look like this:
XMLHandle
DefaultNS string
// Encoding:
NS map[string]string // ns URI to key, used for encoding
// Decoding: in case ENTITY declared in external schema or dtd, store info needed here
Entities map[string]string // map of entity rep to character
During encode, if a namespace mapping is not defined for a namespace found on a struct,
then we create a mapping for it using nsN (where N is 1..1000000, and doesn't conflict
with any other namespace mapping).
Note that different fields in a struct can have different namespaces.
However, all fields will default to the namespace on the _struct field (if defined).
An XML document is a name, a map of attributes and a list of children.
Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example).
We have to "DecodeNaked" into something that resembles XML data.
To support DecodeNaked (decode into nil interface{}), we have to define some "supporting" types:
type Name struct { // Preferred. Less allocations due to conversions.
Local string
Space string
}
type Element struct {
Name Name
Attrs map[Name]string
Children []interface{} // each child is either *Element or string
}
Only two "supporting" types are exposed for XML: Name and Element.
// ------------------
We considered 'type Name string' where Name is like "Space Local" (space-separated).
We decided against it, because each creation of a name would lead to
double allocation (first convert []byte to string, then concatenate them into a string).
The benefit is that it is faster to read Attrs from a map. But given that Element is a value
object, we want to eschew methods and have public exposed variables.
We also considered the following, where xml types were not value objects, and we used
intelligent accessor methods to extract information and for performance.
*** WE DECIDED AGAINST THIS. ***
type Attr struct {
Name Name
Value string
}
// Element is a ValueObject: There are no accessor methods.
// Make element self-contained.
type Element struct {
Name Name
attrsMap map[string]string // where key is "Space Local"
attrs []Attr
childrenT []string
childrenE []Element
childrenI []int // each child is a index into T or E.
}
func (x *Element) child(i) interface{} // returns string or *Element
// ------------------
Per XML spec and our default handling, white space is always treated as
insignificant between elements, except in a text node. The xml:space='preserve'
attribute is ignored.
**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.**
**So treat them as just "directives" that should be interpreted to mean something**.
On encoding, we support indenting aka prettifying markup in the same way we support it for json.
A document or element can only be encoded/decoded from/to a struct. In this mode:
- struct name maps to element name (or tag-info from _struct field)
- fields are mapped to child elements or attributes
A map is either encoded as attributes on current element, or as a set of child elements.
Maps are encoded as attributes iff their keys and values are primitives (number, bool, string).
A list is encoded as a set of child elements.
Primitives (number, bool, string) are encoded as an element, attribute or text
depending on the context.
Extensions must encode themselves as a text string.
Encoding is tough, specifically when encoding mappings, because we need to encode
as either attribute or element. To do this, we need to default to encoding as attributes,
and then let Encoder inform the Handle when to start encoding as nodes.
i.e. Encoder does something like:
h.EncodeMapStart()
h.Encode(), h.Encode(), ...
h.EncodeMapNotAttrSignal() // this is not a bool, because it's a signal
h.Encode(), h.Encode(), ...
h.EncodeEnd()
Only XMLHandle understands this, and will set itself to start encoding as elements.
This support extends to maps. For example, if a struct field is a map, and it has
the struct tag signifying it should be attr, then all its fields are encoded as attributes.
e.g.
type X struct {
M map[string]int `codec:"m,attr"` // encode keys as attributes named
}
Question:
- if encoding a map, what if map keys have spaces in them???
Then they cannot be attributes or child elements. Error.
Options to consider adding later:
- For attribute values, normalize by trimming beginning and ending white space,
and converting every white space sequence to a single space.
- ATTLIST restrictions are enforced.
e.g. default value of xml:space, skipping xml:XYZ style attributes, etc.
- Consider supporting NON-STRICT mode (e.g. to handle HTML parsing).
Some elements e.g. br, hr, etc need not close and should be auto-closed
... (see http://www.w3.org/TR/html4/loose.dtd)
An expansive set of entities are pre-defined.
- Have easy way to create a HTML parser:
add a HTML() method to XMLHandle, that will set Strict=false, specify AutoClose,
and add HTML Entities to the list.
- Support validating element/attribute XMLName before writing it.
Keep this behind a flag, which is set to false by default (for performance).
type XMLHandle struct {
CheckName bool
}
Misc:
ROADMAP (1 weeks):
- build encoder (1 day)
- build decoder (based off xmlParser) (1 day)
- implement xmlParser (2 days).
Look at encoding/xml for inspiration.
- integrate and TEST (1 days)
- write article and post it (1 day)
// ---------- MORE NOTES FROM 2017-11-30 ------------
when parsing
- parse the attributes first
- then parse the nodes
basically:
- if encoding a field: we use the field name for the wrapper
- if encoding a non-field, then just use the element type name
map[string]string ==> <map><key>abc</key><value>val</value></map>... or
<map key="abc">val</map>... OR
<key1>val1</key1><key2>val2</key2>... <- PREFERED
[]string ==> <string>v1</string><string>v2</string>...
string v1 ==> <string>v1</string>
bool true ==> <bool>true</bool>
float 1.0 ==> <float>1.0</float>
...
F1 map[string]string ==> <F1><key>abc</key><value>val</value></F1>... OR
<F1 key="abc">val</F1>... OR
<F1><abc>val</abc>...</F1> <- PREFERED
F2 []string ==> <F2>v1</F2><F2>v2</F2>...
F3 bool ==> <F3>true</F3>
...
- a scalar is encoded as:
(value) of type T ==> <T><value/></T>
(value) of field F ==> <F><value/></F>
- A kv-pair is encoded as:
(key,value) ==> <map><key><value/></key></map> OR <map key="value">
(key,value) of field F ==> <F><key><value/></key></F> OR <F key="value">
- A map or struct is just a list of kv-pairs
- A list is encoded as sequences of same node e.g.
<F1 key1="value11">
<F1 key2="value12">
<F2>value21</F2>
<F2>value22</F2>
- we may have to singularize the field name, when entering into xml,
and pluralize them when encoding.
- bi-directional encode->decode->encode is not a MUST.
even encoding/xml cannot decode correctly what was encoded:
see https://play.golang.org/p/224V_nyhMS
func main() {
fmt.Println("Hello, playground")
v := []interface{}{"hello", 1, true, nil, time.Now()}
s, err := xml.Marshal(v)
fmt.Printf("err: %v, \ns: %s\n", err, s)
var v2 []interface{}
err = xml.Unmarshal(s, &v2)
fmt.Printf("err: %v, \nv2: %v\n", err, v2)
type T struct {
V []interface{}
}
v3 := T{V: v}
s, err = xml.Marshal(v3)
fmt.Printf("err: %v, \ns: %s\n", err, s)
var v4 T
err = xml.Unmarshal(s, &v4)
fmt.Printf("err: %v, \nv4: %v\n", err, v4)
}
Output:
err: <nil>,
s: <string>hello</string><int>1</int><bool>true</bool><Time>2009-11-10T23:00:00Z</Time>
err: <nil>,
v2: [<nil>]
err: <nil>,
s: <T><V>hello</V><V>1</V><V>true</V><V>2009-11-10T23:00:00Z</V></T>
err: <nil>,
v4: {[<nil> <nil> <nil> <nil>]}
-
*/
// ----------- PARSER -------------------
type xmlTokenType uint8
const (
_ xmlTokenType = iota << 1
xmlTokenElemStart
xmlTokenElemEnd
xmlTokenAttrKey
xmlTokenAttrVal
xmlTokenText
)
type xmlToken struct {
Type xmlTokenType
Value string
Namespace string // blank for AttrVal and Text
}
type xmlParser struct {
r decReader
toks []xmlToken // list of tokens.
ptr int // ptr into the toks slice
done bool // nothing else to parse. r now returns EOF.
}
func (x *xmlParser) next() (t *xmlToken) {
// once x.done, or x.ptr == len(x.toks) == 0, then return nil (to signify finish)
if !x.done && len(x.toks) == 0 {
x.nextTag()
}
// parses one element at a time (into possible many tokens)
if x.ptr < len(x.toks) {
t = &(x.toks[x.ptr])
x.ptr++
if x.ptr == len(x.toks) {
x.ptr = 0
x.toks = x.toks[:0]
}
}
return
}
// nextTag will parses the next element and fill up toks.
// It set done flag if/once EOF is reached.
func (x *xmlParser) nextTag() {
// TODO: implement.
}
// ----------- ENCODER -------------------
type xmlEncDriver struct {
e *Encoder
w encWriter
h *XMLHandle
b [64]byte // scratch
bs []byte // scratch
// s jsonStack
noBuiltInTypes
}
// ----------- DECODER -------------------
type xmlDecDriver struct {
d *Decoder
h *XMLHandle
r decReader // *bytesDecReader decReader
ct valueType // container type. one of unset, array or map.
bstr [8]byte // scratch used for string \UXXX parsing
b [64]byte // scratch
// wsSkipped bool // whitespace skipped
// s jsonStack
noBuiltInTypes
}
// DecodeNaked will decode into an XMLNode
// XMLName is a value object representing a namespace-aware NAME
type XMLName struct {
Local string
Space string
}
// XMLNode represents a "union" of the different types of XML Nodes.
// Only one of fields (Text or *Element) is set.
type XMLNode struct {
Element *Element
Text string
}
// XMLElement is a value object representing an fully-parsed XML element.
type XMLElement struct {
Name Name
Attrs map[XMLName]string
// Children is a list of child nodes, each being a *XMLElement or string
Children []XMLNode
}
// ----------- HANDLE -------------------
type XMLHandle struct {
BasicHandle
textEncodingType
DefaultNS string
NS map[string]string // ns URI to key, for encoding
Entities map[string]string // entity representation to string, for encoding.
}
func (h *XMLHandle) newEncDriver(e *Encoder) encDriver {
return &xmlEncDriver{e: e, w: e.w, h: h}
}
func (h *XMLHandle) newDecDriver(d *Decoder) decDriver {
// d := xmlDecDriver{r: r.(*bytesDecReader), h: h}
hd := xmlDecDriver{d: d, r: d.r, h: h}
hd.n.bytes = d.b[:]
return &hd
}
func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext})
}
var _ decDriver = (*xmlDecDriver)(nil)
var _ encDriver = (*xmlEncDriver)(nil)